diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index def441a3..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,80 +0,0 @@ -# Use the latest 2.1 version of CircleCI pipeline process engine. -# See: https://circleci.com/docs/2.0/configuration-reference -version: 2.1 - -# Orbs are reusable packages of CircleCI configuration that you may share across projects, enabling you to create encapsulated, parameterized commands, jobs, and executors that can be used across multiple projects. -# See: https://circleci.com/docs/2.0/orb-intro/ -orbs: - # The python orb contains a set of prepackaged CircleCI configuration you can use repeatedly in your configuration files - # Orb commands and jobs help you with common scripting around a language/tool - # so you dont have to copy and paste it everywhere. - # See the orb documentation here: https://circleci.com/developer/orbs/orb/circleci/python - python: circleci/python@1.5.0 - -# Define a job to be invoked later in a workflow. -# See: https://circleci.com/docs/2.0/configuration-reference/#jobs -jobs: - build-and-test: # This is the name of the job, feel free to change it to better match what you're trying to do! - # These next lines defines a Docker executors: https://circleci.com/docs/2.0/executor-types/ - # You can specify an image from Dockerhub or use one of the convenience images from CircleCI's Developer Hub - # A list of available CircleCI Docker convenience images are available here: https://circleci.com/developer/images/image/cimg/python - # The executor is the environment in which the steps below will be executed - below will use a python 3.10.2 container - # Change the version below to your required version of python - docker: - # Important: Don't change this otherwise we will stop testing the earliest - # python version we have to support. - - image: python:3.8-bullseye - resource_class: small - steps: - - checkout # checkout source code to working directory - - run: - name: Install Environment Dependencies - command: | # install dependencies - apt-get -y install curl - pip install --upgrade pip - pip install poetry - poetry install --no-ansi - - - run: - name: Black Formatting Check # Only validation, without re-formatting - command: | - poetry run black --check -t py36 launch - - run: - name: Ruff Lint Check # Uses pyproject.toml for configuration - command: | - poetry run ruff launch - - run: - name: Pylint Lint Check # Uses .pylintrc for configuration - command: | - poetry run pylint launch --ignore=api_client,openapi_client - - run: - name: MyPy typing check - command: | - poetry run mypy --ignore-missing-imports launch --exclude launch/api_client --exclude launch/openapi_client - - run: - name: Isort Import Formatting Check # Only validation, without re-formatting - command: | - poetry run isort --check-only launch - - run: - name: Pytest Test Cases - command: | - mkdir test_results - set -e - TEST_FILES=$(circleci tests glob "tests/**/test_*.py") - poetry run coverage run --include=launch/* -m pytest $TEST_FILES - poetry run coverage report - poetry run coverage html - - store_test_results: - path: htmlcov - - store_test_results: - path: test_results - - store_artifacts: - path: test_results - -# Invoke jobs via workflows -# See: https://circleci.com/docs/2.0/configuration-reference/#workflows -workflows: - build_and_test: # This is the name of the workflow, feel free to change it to better match your workflow. - # Inside the workflow, you define the jobs you want to run. - jobs: - - build-and-test diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 568870df..00000000 --- a/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -# python -**/__pycache__/ - -# editor -.vscode -**/.idea/ -**/scratch/ -*.swp -.vim/ - -# Sphinx documentation -docs/_sources -docs/.doctrees - -site/ - -.DS_Store diff --git a/.isort.cfg b/.isort.cfg deleted file mode 100644 index c7c32c0a..00000000 --- a/.isort.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[settings] -profile=black -# Enforced in .flake8 -line_length=79 diff --git a/docs/.nojekyll b/.nojekyll similarity index 100% rename from docs/.nojekyll rename to .nojekyll diff --git a/.openapi-generator-ignore b/.openapi-generator-ignore deleted file mode 100644 index 140a9113..00000000 --- a/.openapi-generator-ignore +++ /dev/null @@ -1,19 +0,0 @@ -# OpenAPI Generator Ignore -# Files listed here will not be overwritten by the generator - -# Custom wrapper code -launch/__init__.py -launch/client.py -launch/model_endpoint.py - -# Project files we maintain manually -README.md -.gitignore -requirements.txt - -# Type stub files (have syntax errors due to generator bug) -**/*.pyi - -# Generated docs (have invalid Python syntax in examples due to generator bug) -launch/api_client_README.md -launch/api_client/docs/** diff --git a/.openapi-generator/FILES b/.openapi-generator/FILES deleted file mode 100644 index 3b4c5ac1..00000000 --- a/.openapi-generator/FILES +++ /dev/null @@ -1,456 +0,0 @@ -launch/api_client/__init__.py -launch/api_client/api_client.py -launch/api_client/apis/__init__.py -launch/api_client/apis/tags/default_api.py -launch/api_client/configuration.py -launch/api_client/docs/apis/tags/DefaultApi.md -launch/api_client/docs/models/Annotation.md -launch/api_client/docs/models/Audio.md -launch/api_client/docs/models/Audio1.md -launch/api_client/docs/models/Audio2.md -launch/api_client/docs/models/BatchCompletionsJob.md -launch/api_client/docs/models/BatchCompletionsJobStatus.md -launch/api_client/docs/models/BatchCompletionsModelConfig.md -launch/api_client/docs/models/BatchJobSerializationFormat.md -launch/api_client/docs/models/BatchJobStatus.md -launch/api_client/docs/models/CallbackAuth.md -launch/api_client/docs/models/CallbackBasicAuth.md -launch/api_client/docs/models/CallbackmTLSAuth.md -launch/api_client/docs/models/CancelBatchCompletionsV2Response.md -launch/api_client/docs/models/CancelFineTuneResponse.md -launch/api_client/docs/models/ChatCompletionFunctionCallOption.md -launch/api_client/docs/models/ChatCompletionFunctions.md -launch/api_client/docs/models/ChatCompletionMessageToolCall.md -launch/api_client/docs/models/ChatCompletionMessageToolCallChunk.md -launch/api_client/docs/models/ChatCompletionMessageToolCallsInput.md -launch/api_client/docs/models/ChatCompletionMessageToolCallsOutput.md -launch/api_client/docs/models/ChatCompletionNamedToolChoice.md -launch/api_client/docs/models/ChatCompletionRequestAssistantMessage.md -launch/api_client/docs/models/ChatCompletionRequestAssistantMessageContentPart.md -launch/api_client/docs/models/ChatCompletionRequestDeveloperMessage.md -launch/api_client/docs/models/ChatCompletionRequestFunctionMessage.md -launch/api_client/docs/models/ChatCompletionRequestMessage.md -launch/api_client/docs/models/ChatCompletionRequestMessageContentPartAudio.md -launch/api_client/docs/models/ChatCompletionRequestMessageContentPartFile.md -launch/api_client/docs/models/ChatCompletionRequestMessageContentPartImage.md -launch/api_client/docs/models/ChatCompletionRequestMessageContentPartRefusal.md -launch/api_client/docs/models/ChatCompletionRequestMessageContentPartText.md -launch/api_client/docs/models/ChatCompletionRequestSystemMessage.md -launch/api_client/docs/models/ChatCompletionRequestSystemMessageContentPart.md -launch/api_client/docs/models/ChatCompletionRequestToolMessage.md -launch/api_client/docs/models/ChatCompletionRequestToolMessageContentPart.md -launch/api_client/docs/models/ChatCompletionRequestUserMessage.md -launch/api_client/docs/models/ChatCompletionRequestUserMessageContentPart.md -launch/api_client/docs/models/ChatCompletionResponseMessage.md -launch/api_client/docs/models/ChatCompletionStreamOptions.md -launch/api_client/docs/models/ChatCompletionStreamResponseDelta.md -launch/api_client/docs/models/ChatCompletionTokenLogprob.md -launch/api_client/docs/models/ChatCompletionTool.md -launch/api_client/docs/models/ChatCompletionToolChoiceOption.md -launch/api_client/docs/models/ChatCompletionV2Request.md -launch/api_client/docs/models/ChatCompletionV2StreamErrorChunk.md -launch/api_client/docs/models/Choice.md -launch/api_client/docs/models/Choice1.md -launch/api_client/docs/models/Choice2.md -launch/api_client/docs/models/CloneModelBundleV1Request.md -launch/api_client/docs/models/CloneModelBundleV2Request.md -launch/api_client/docs/models/CloudpickleArtifactFlavor.md -launch/api_client/docs/models/CompletionOutput.md -launch/api_client/docs/models/CompletionStreamOutput.md -launch/api_client/docs/models/CompletionStreamV1Request.md -launch/api_client/docs/models/CompletionStreamV1Response.md -launch/api_client/docs/models/CompletionSyncV1Request.md -launch/api_client/docs/models/CompletionSyncV1Response.md -launch/api_client/docs/models/CompletionTokensDetails.md -launch/api_client/docs/models/CompletionUsage.md -launch/api_client/docs/models/CompletionV2Request.md -launch/api_client/docs/models/CompletionV2StreamErrorChunk.md -launch/api_client/docs/models/Content.md -launch/api_client/docs/models/Content1.md -launch/api_client/docs/models/Content2.md -launch/api_client/docs/models/Content3.md -launch/api_client/docs/models/Content4.md -launch/api_client/docs/models/Content8.md -launch/api_client/docs/models/CreateAsyncTaskV1Response.md -launch/api_client/docs/models/CreateBatchCompletionsV1ModelConfig.md -launch/api_client/docs/models/CreateBatchCompletionsV1Request.md -launch/api_client/docs/models/CreateBatchCompletionsV1RequestContent.md -launch/api_client/docs/models/CreateBatchCompletionsV1Response.md -launch/api_client/docs/models/CreateBatchCompletionsV2Request.md -launch/api_client/docs/models/CreateBatchJobResourceRequests.md -launch/api_client/docs/models/CreateBatchJobV1Request.md -launch/api_client/docs/models/CreateBatchJobV1Response.md -launch/api_client/docs/models/CreateChatCompletionResponse.md -launch/api_client/docs/models/CreateChatCompletionStreamResponse.md -launch/api_client/docs/models/CreateCompletionResponse.md -launch/api_client/docs/models/CreateDeepSpeedModelEndpointRequest.md -launch/api_client/docs/models/CreateDockerImageBatchJobBundleV1Request.md -launch/api_client/docs/models/CreateDockerImageBatchJobBundleV1Response.md -launch/api_client/docs/models/CreateDockerImageBatchJobResourceRequests.md -launch/api_client/docs/models/CreateDockerImageBatchJobV1Request.md -launch/api_client/docs/models/CreateDockerImageBatchJobV1Response.md -launch/api_client/docs/models/CreateFineTuneRequest.md -launch/api_client/docs/models/CreateFineTuneResponse.md -launch/api_client/docs/models/CreateLLMModelEndpointV1Request.md -launch/api_client/docs/models/CreateLLMModelEndpointV1Response.md -launch/api_client/docs/models/CreateLightLLMModelEndpointRequest.md -launch/api_client/docs/models/CreateModelBundleV1Request.md -launch/api_client/docs/models/CreateModelBundleV1Response.md -launch/api_client/docs/models/CreateModelBundleV2Request.md -launch/api_client/docs/models/CreateModelBundleV2Response.md -launch/api_client/docs/models/CreateModelEndpointV1Request.md -launch/api_client/docs/models/CreateModelEndpointV1Response.md -launch/api_client/docs/models/CreateSGLangModelEndpointRequest.md -launch/api_client/docs/models/CreateTensorRTLLMModelEndpointRequest.md -launch/api_client/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md -launch/api_client/docs/models/CreateTriggerV1Request.md -launch/api_client/docs/models/CreateTriggerV1Response.md -launch/api_client/docs/models/CreateVLLMModelEndpointRequest.md -launch/api_client/docs/models/CustomFramework.md -launch/api_client/docs/models/DeleteFileResponse.md -launch/api_client/docs/models/DeleteLLMEndpointResponse.md -launch/api_client/docs/models/DeleteModelEndpointV1Response.md -launch/api_client/docs/models/DeleteTriggerV1Response.md -launch/api_client/docs/models/DockerImageBatchJob.md -launch/api_client/docs/models/DockerImageBatchJobBundleV1Response.md -launch/api_client/docs/models/EndpointPredictV1Request.md -launch/api_client/docs/models/File.md -launch/api_client/docs/models/FilteredChatCompletionV2Request.md -launch/api_client/docs/models/FilteredCompletionV2Request.md -launch/api_client/docs/models/Function1.md -launch/api_client/docs/models/Function2.md -launch/api_client/docs/models/Function3.md -launch/api_client/docs/models/FunctionCall.md -launch/api_client/docs/models/FunctionCall2.md -launch/api_client/docs/models/FunctionObject.md -launch/api_client/docs/models/FunctionParameters.md -launch/api_client/docs/models/GetAsyncTaskV1Response.md -launch/api_client/docs/models/GetBatchCompletionV2Response.md -launch/api_client/docs/models/GetBatchJobV1Response.md -launch/api_client/docs/models/GetDockerImageBatchJobV1Response.md -launch/api_client/docs/models/GetFileContentResponse.md -launch/api_client/docs/models/GetFileResponse.md -launch/api_client/docs/models/GetFineTuneEventsResponse.md -launch/api_client/docs/models/GetFineTuneResponse.md -launch/api_client/docs/models/GetLLMModelEndpointV1Response.md -launch/api_client/docs/models/GetModelEndpointV1Response.md -launch/api_client/docs/models/GetTriggerV1Response.md -launch/api_client/docs/models/GpuType.md -launch/api_client/docs/models/HTTPValidationError.md -launch/api_client/docs/models/ImageUrl.md -launch/api_client/docs/models/InputAudio.md -launch/api_client/docs/models/JsonSchema.md -launch/api_client/docs/models/LLMFineTuneEvent.md -launch/api_client/docs/models/LLMInferenceFramework.md -launch/api_client/docs/models/LLMSource.md -launch/api_client/docs/models/ListDockerImageBatchJobBundleV1Response.md -launch/api_client/docs/models/ListDockerImageBatchJobsV1Response.md -launch/api_client/docs/models/ListFilesResponse.md -launch/api_client/docs/models/ListFineTunesResponse.md -launch/api_client/docs/models/ListLLMModelEndpointsV1Response.md -launch/api_client/docs/models/ListModelBundlesV1Response.md -launch/api_client/docs/models/ListModelBundlesV2Response.md -launch/api_client/docs/models/ListModelEndpointsV1Response.md -launch/api_client/docs/models/ListTriggersV1Response.md -launch/api_client/docs/models/Logprobs.md -launch/api_client/docs/models/Logprobs2.md -launch/api_client/docs/models/Metadata.md -launch/api_client/docs/models/ModelBundleEnvironmentParams.md -launch/api_client/docs/models/ModelBundleFrameworkType.md -launch/api_client/docs/models/ModelBundleOrderBy.md -launch/api_client/docs/models/ModelBundlePackagingType.md -launch/api_client/docs/models/ModelBundleV1Response.md -launch/api_client/docs/models/ModelBundleV2Response.md -launch/api_client/docs/models/ModelDownloadRequest.md -launch/api_client/docs/models/ModelDownloadResponse.md -launch/api_client/docs/models/ModelEndpointDeploymentState.md -launch/api_client/docs/models/ModelEndpointOrderBy.md -launch/api_client/docs/models/ModelEndpointResourceState.md -launch/api_client/docs/models/ModelEndpointStatus.md -launch/api_client/docs/models/ModelEndpointType.md -launch/api_client/docs/models/ParallelToolCalls.md -launch/api_client/docs/models/PredictionContent.md -launch/api_client/docs/models/Prompt.md -launch/api_client/docs/models/Prompt1.md -launch/api_client/docs/models/Prompt1Item.md -launch/api_client/docs/models/PromptTokensDetails.md -launch/api_client/docs/models/PytorchFramework.md -launch/api_client/docs/models/Quantization.md -launch/api_client/docs/models/ReasoningEffort.md -launch/api_client/docs/models/RequestSchema.md -launch/api_client/docs/models/ResponseFormatJsonObject.md -launch/api_client/docs/models/ResponseFormatJsonSchema.md -launch/api_client/docs/models/ResponseFormatJsonSchemaSchema.md -launch/api_client/docs/models/ResponseFormatText.md -launch/api_client/docs/models/ResponseModalities.md -launch/api_client/docs/models/ResponseSchema.md -launch/api_client/docs/models/RestartModelEndpointV1Response.md -launch/api_client/docs/models/RunnableImageFlavor.md -launch/api_client/docs/models/ServiceTier.md -launch/api_client/docs/models/StopConfiguration.md -launch/api_client/docs/models/StopConfiguration1.md -launch/api_client/docs/models/StreamError.md -launch/api_client/docs/models/StreamErrorContent.md -launch/api_client/docs/models/StreamingEnhancedRunnableImageFlavor.md -launch/api_client/docs/models/SyncEndpointPredictV1Request.md -launch/api_client/docs/models/SyncEndpointPredictV1Response.md -launch/api_client/docs/models/TaskStatus.md -launch/api_client/docs/models/TensorflowFramework.md -launch/api_client/docs/models/TokenOutput.md -launch/api_client/docs/models/ToolConfig.md -launch/api_client/docs/models/TopLogprob.md -launch/api_client/docs/models/TritonEnhancedRunnableImageFlavor.md -launch/api_client/docs/models/UpdateBatchCompletionsV2Request.md -launch/api_client/docs/models/UpdateBatchCompletionsV2Response.md -launch/api_client/docs/models/UpdateBatchJobV1Request.md -launch/api_client/docs/models/UpdateBatchJobV1Response.md -launch/api_client/docs/models/UpdateDeepSpeedModelEndpointRequest.md -launch/api_client/docs/models/UpdateDockerImageBatchJobV1Request.md -launch/api_client/docs/models/UpdateDockerImageBatchJobV1Response.md -launch/api_client/docs/models/UpdateLLMModelEndpointV1Request.md -launch/api_client/docs/models/UpdateLLMModelEndpointV1Response.md -launch/api_client/docs/models/UpdateModelEndpointV1Request.md -launch/api_client/docs/models/UpdateModelEndpointV1Response.md -launch/api_client/docs/models/UpdateSGLangModelEndpointRequest.md -launch/api_client/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md -launch/api_client/docs/models/UpdateTriggerV1Request.md -launch/api_client/docs/models/UpdateTriggerV1Response.md -launch/api_client/docs/models/UpdateVLLMModelEndpointRequest.md -launch/api_client/docs/models/UploadFileResponse.md -launch/api_client/docs/models/UrlCitation.md -launch/api_client/docs/models/UserLocation.md -launch/api_client/docs/models/ValidationError.md -launch/api_client/docs/models/VoiceIdsShared.md -launch/api_client/docs/models/WebSearchContextSize.md -launch/api_client/docs/models/WebSearchLocation.md -launch/api_client/docs/models/WebSearchOptions.md -launch/api_client/docs/models/ZipArtifactFlavor.md -launch/api_client/exceptions.py -launch/api_client/model/__init__.py -launch/api_client/model/annotation.py -launch/api_client/model/audio.py -launch/api_client/model/audio1.py -launch/api_client/model/audio2.py -launch/api_client/model/batch_completions_job.py -launch/api_client/model/batch_completions_job_status.py -launch/api_client/model/batch_completions_model_config.py -launch/api_client/model/batch_job_serialization_format.py -launch/api_client/model/batch_job_status.py -launch/api_client/model/callback_auth.py -launch/api_client/model/callback_basic_auth.py -launch/api_client/model/callbackm_tls_auth.py -launch/api_client/model/cancel_batch_completions_v2_response.py -launch/api_client/model/cancel_fine_tune_response.py -launch/api_client/model/chat_completion_function_call_option.py -launch/api_client/model/chat_completion_functions.py -launch/api_client/model/chat_completion_message_tool_call.py -launch/api_client/model/chat_completion_message_tool_call_chunk.py -launch/api_client/model/chat_completion_message_tool_calls_input.py -launch/api_client/model/chat_completion_message_tool_calls_output.py -launch/api_client/model/chat_completion_named_tool_choice.py -launch/api_client/model/chat_completion_request_assistant_message.py -launch/api_client/model/chat_completion_request_assistant_message_content_part.py -launch/api_client/model/chat_completion_request_developer_message.py -launch/api_client/model/chat_completion_request_function_message.py -launch/api_client/model/chat_completion_request_message.py -launch/api_client/model/chat_completion_request_message_content_part_audio.py -launch/api_client/model/chat_completion_request_message_content_part_file.py -launch/api_client/model/chat_completion_request_message_content_part_image.py -launch/api_client/model/chat_completion_request_message_content_part_refusal.py -launch/api_client/model/chat_completion_request_message_content_part_text.py -launch/api_client/model/chat_completion_request_system_message.py -launch/api_client/model/chat_completion_request_system_message_content_part.py -launch/api_client/model/chat_completion_request_tool_message.py -launch/api_client/model/chat_completion_request_tool_message_content_part.py -launch/api_client/model/chat_completion_request_user_message.py -launch/api_client/model/chat_completion_request_user_message_content_part.py -launch/api_client/model/chat_completion_response_message.py -launch/api_client/model/chat_completion_stream_options.py -launch/api_client/model/chat_completion_stream_response_delta.py -launch/api_client/model/chat_completion_token_logprob.py -launch/api_client/model/chat_completion_tool.py -launch/api_client/model/chat_completion_tool_choice_option.py -launch/api_client/model/chat_completion_v2_request.py -launch/api_client/model/chat_completion_v2_stream_error_chunk.py -launch/api_client/model/choice.py -launch/api_client/model/choice1.py -launch/api_client/model/choice2.py -launch/api_client/model/clone_model_bundle_v1_request.py -launch/api_client/model/clone_model_bundle_v2_request.py -launch/api_client/model/cloudpickle_artifact_flavor.py -launch/api_client/model/completion_output.py -launch/api_client/model/completion_stream_output.py -launch/api_client/model/completion_stream_v1_request.py -launch/api_client/model/completion_stream_v1_response.py -launch/api_client/model/completion_sync_v1_request.py -launch/api_client/model/completion_sync_v1_response.py -launch/api_client/model/completion_tokens_details.py -launch/api_client/model/completion_usage.py -launch/api_client/model/completion_v2_request.py -launch/api_client/model/completion_v2_stream_error_chunk.py -launch/api_client/model/content.py -launch/api_client/model/content1.py -launch/api_client/model/content2.py -launch/api_client/model/content3.py -launch/api_client/model/content4.py -launch/api_client/model/content8.py -launch/api_client/model/create_async_task_v1_response.py -launch/api_client/model/create_batch_completions_v1_model_config.py -launch/api_client/model/create_batch_completions_v1_request.py -launch/api_client/model/create_batch_completions_v1_request_content.py -launch/api_client/model/create_batch_completions_v1_response.py -launch/api_client/model/create_batch_completions_v2_request.py -launch/api_client/model/create_batch_job_resource_requests.py -launch/api_client/model/create_batch_job_v1_request.py -launch/api_client/model/create_batch_job_v1_response.py -launch/api_client/model/create_chat_completion_response.py -launch/api_client/model/create_chat_completion_stream_response.py -launch/api_client/model/create_completion_response.py -launch/api_client/model/create_deep_speed_model_endpoint_request.py -launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py -launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py -launch/api_client/model/create_docker_image_batch_job_resource_requests.py -launch/api_client/model/create_docker_image_batch_job_v1_request.py -launch/api_client/model/create_docker_image_batch_job_v1_response.py -launch/api_client/model/create_fine_tune_request.py -launch/api_client/model/create_fine_tune_response.py -launch/api_client/model/create_light_llm_model_endpoint_request.py -launch/api_client/model/create_llm_model_endpoint_v1_request.py -launch/api_client/model/create_llm_model_endpoint_v1_response.py -launch/api_client/model/create_model_bundle_v1_request.py -launch/api_client/model/create_model_bundle_v1_response.py -launch/api_client/model/create_model_bundle_v2_request.py -launch/api_client/model/create_model_bundle_v2_response.py -launch/api_client/model/create_model_endpoint_v1_request.py -launch/api_client/model/create_model_endpoint_v1_response.py -launch/api_client/model/create_sg_lang_model_endpoint_request.py -launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py -launch/api_client/model/create_text_generation_inference_model_endpoint_request.py -launch/api_client/model/create_trigger_v1_request.py -launch/api_client/model/create_trigger_v1_response.py -launch/api_client/model/create_vllm_model_endpoint_request.py -launch/api_client/model/custom_framework.py -launch/api_client/model/delete_file_response.py -launch/api_client/model/delete_llm_endpoint_response.py -launch/api_client/model/delete_model_endpoint_v1_response.py -launch/api_client/model/delete_trigger_v1_response.py -launch/api_client/model/docker_image_batch_job.py -launch/api_client/model/docker_image_batch_job_bundle_v1_response.py -launch/api_client/model/endpoint_predict_v1_request.py -launch/api_client/model/file.py -launch/api_client/model/filtered_chat_completion_v2_request.py -launch/api_client/model/filtered_completion_v2_request.py -launch/api_client/model/function1.py -launch/api_client/model/function2.py -launch/api_client/model/function3.py -launch/api_client/model/function_call.py -launch/api_client/model/function_call2.py -launch/api_client/model/function_object.py -launch/api_client/model/function_parameters.py -launch/api_client/model/get_async_task_v1_response.py -launch/api_client/model/get_batch_completion_v2_response.py -launch/api_client/model/get_batch_job_v1_response.py -launch/api_client/model/get_docker_image_batch_job_v1_response.py -launch/api_client/model/get_file_content_response.py -launch/api_client/model/get_file_response.py -launch/api_client/model/get_fine_tune_events_response.py -launch/api_client/model/get_fine_tune_response.py -launch/api_client/model/get_llm_model_endpoint_v1_response.py -launch/api_client/model/get_model_endpoint_v1_response.py -launch/api_client/model/get_trigger_v1_response.py -launch/api_client/model/gpu_type.py -launch/api_client/model/http_validation_error.py -launch/api_client/model/image_url.py -launch/api_client/model/input_audio.py -launch/api_client/model/json_schema.py -launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py -launch/api_client/model/list_docker_image_batch_jobs_v1_response.py -launch/api_client/model/list_files_response.py -launch/api_client/model/list_fine_tunes_response.py -launch/api_client/model/list_llm_model_endpoints_v1_response.py -launch/api_client/model/list_model_bundles_v1_response.py -launch/api_client/model/list_model_bundles_v2_response.py -launch/api_client/model/list_model_endpoints_v1_response.py -launch/api_client/model/list_triggers_v1_response.py -launch/api_client/model/llm_fine_tune_event.py -launch/api_client/model/llm_inference_framework.py -launch/api_client/model/llm_source.py -launch/api_client/model/logprobs.py -launch/api_client/model/logprobs2.py -launch/api_client/model/metadata.py -launch/api_client/model/model_bundle_environment_params.py -launch/api_client/model/model_bundle_framework_type.py -launch/api_client/model/model_bundle_order_by.py -launch/api_client/model/model_bundle_packaging_type.py -launch/api_client/model/model_bundle_v1_response.py -launch/api_client/model/model_bundle_v2_response.py -launch/api_client/model/model_download_request.py -launch/api_client/model/model_download_response.py -launch/api_client/model/model_endpoint_deployment_state.py -launch/api_client/model/model_endpoint_order_by.py -launch/api_client/model/model_endpoint_resource_state.py -launch/api_client/model/model_endpoint_status.py -launch/api_client/model/model_endpoint_type.py -launch/api_client/model/parallel_tool_calls.py -launch/api_client/model/prediction_content.py -launch/api_client/model/prompt.py -launch/api_client/model/prompt1.py -launch/api_client/model/prompt1_item.py -launch/api_client/model/prompt_tokens_details.py -launch/api_client/model/pytorch_framework.py -launch/api_client/model/quantization.py -launch/api_client/model/reasoning_effort.py -launch/api_client/model/request_schema.py -launch/api_client/model/response_format_json_object.py -launch/api_client/model/response_format_json_schema.py -launch/api_client/model/response_format_json_schema_schema.py -launch/api_client/model/response_format_text.py -launch/api_client/model/response_modalities.py -launch/api_client/model/response_schema.py -launch/api_client/model/restart_model_endpoint_v1_response.py -launch/api_client/model/runnable_image_flavor.py -launch/api_client/model/service_tier.py -launch/api_client/model/stop_configuration.py -launch/api_client/model/stop_configuration1.py -launch/api_client/model/stream_error.py -launch/api_client/model/stream_error_content.py -launch/api_client/model/streaming_enhanced_runnable_image_flavor.py -launch/api_client/model/sync_endpoint_predict_v1_request.py -launch/api_client/model/sync_endpoint_predict_v1_response.py -launch/api_client/model/task_status.py -launch/api_client/model/tensorflow_framework.py -launch/api_client/model/token_output.py -launch/api_client/model/tool_config.py -launch/api_client/model/top_logprob.py -launch/api_client/model/triton_enhanced_runnable_image_flavor.py -launch/api_client/model/update_batch_completions_v2_request.py -launch/api_client/model/update_batch_completions_v2_response.py -launch/api_client/model/update_batch_job_v1_request.py -launch/api_client/model/update_batch_job_v1_response.py -launch/api_client/model/update_deep_speed_model_endpoint_request.py -launch/api_client/model/update_docker_image_batch_job_v1_request.py -launch/api_client/model/update_docker_image_batch_job_v1_response.py -launch/api_client/model/update_llm_model_endpoint_v1_request.py -launch/api_client/model/update_llm_model_endpoint_v1_response.py -launch/api_client/model/update_model_endpoint_v1_request.py -launch/api_client/model/update_model_endpoint_v1_response.py -launch/api_client/model/update_sg_lang_model_endpoint_request.py -launch/api_client/model/update_text_generation_inference_model_endpoint_request.py -launch/api_client/model/update_trigger_v1_request.py -launch/api_client/model/update_trigger_v1_response.py -launch/api_client/model/update_vllm_model_endpoint_request.py -launch/api_client/model/upload_file_response.py -launch/api_client/model/url_citation.py -launch/api_client/model/user_location.py -launch/api_client/model/validation_error.py -launch/api_client/model/voice_ids_shared.py -launch/api_client/model/web_search_context_size.py -launch/api_client/model/web_search_location.py -launch/api_client/model/web_search_options.py -launch/api_client/model/zip_artifact_flavor.py -launch/api_client/models/__init__.py -launch/api_client/rest.py -launch/api_client/schemas.py -launch/api_client/test/__init__.py -launch/api_client/test/test_models/__init__.py -launch/api_client_README.md diff --git a/.openapi-generator/VERSION b/.openapi-generator/VERSION deleted file mode 100644 index c0be8a79..00000000 --- a/.openapi-generator/VERSION +++ /dev/null @@ -1 +0,0 @@ -6.4.0 \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 477ceb1f..00000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,41 +0,0 @@ -fail_fast: false -repos: -- repo: local - hooks: - - id: system - name: Black - entry: poetry run black . - pass_filenames: false - language: system - -- repo: local - hooks: - - id: system - name: ruff - entry: poetry run ruff launch - pass_filenames: false - language: system - -- repo: local - hooks: - - id: system - name: isort - entry: poetry run isort . - pass_filenames: false - language: system - -- repo: local - hooks: - - id: system - name: pylint - entry: poetry run pylint launch - pass_filenames: false - language: system - -- repo: local - hooks: - - id: system - name: mypy - entry: poetry run mypy --ignore-missing-imports launch - pass_filenames: false - language: system diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 439abffd..00000000 --- a/.pylintrc +++ /dev/null @@ -1,31 +0,0 @@ -[tool.pylint.MESSAGE_CONTROL] -disable= - no-else-return, - too-few-public-methods, - line-too-long, - duplicate-code, - import-error, - unused-argument, - import-outside-toplevel, - too-many-instance-attributes, - no-member, - W3101, - R1735, - W0511, - R0914, - R0913, - C0114, - C0111, - C0103, - R0904 - -[tool.pylint.REPORTS] -reports=no - -[tool.pylint.FORMAT] -max-line-length=79 - -[MASTER] -# Ignore anything inside launch/clientlib (since it's documentation) -ignore=clientlib,api_client -extension-pkg-whitelist=pydantic diff --git a/404.html b/404.html new file mode 100644 index 00000000..050a34bf --- /dev/null +++ b/404.html @@ -0,0 +1,619 @@ + + + + + + + + + + + + + + + + + + + + Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index eda2cb2c..00000000 --- a/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# Launch Python Client -``` -██╗ █████╗ ██╗ ██╗███╗ ██╗ ██████╗██╗ ██╗ -██║ ██╔══██╗██║ ██║████╗ ██║██╔════╝██║ ██║ -██║ ███████║██║ ██║██╔██╗ ██║██║ ███████║ -██║ ██╔══██║██║ ██║██║╚██╗██║██║ ██╔══██║ -███████╗██║ ██║╚██████╔╝██║ ╚████║╚██████╗██║ ██║ -╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚═╝ ╚═╝ -``` - -Moving an ML model from experiment to production requires significant engineering lift. -Scale Launch provides ML engineers a simple Python interface for turning a local code snippet into a production service. -A ML engineer needs to call a few functions from Scale's SDK, which quickly spins up a production-ready service. -The service efficiently utilizes compute resources and automatically scales according to traffic. - -Latest API/SDK reference can be found [here](https://scaleapi.github.io/launch-python-client/). - -## Deploying your model via Scale Launch - -Central to Scale Launch are the notions of a `ModelBundle` and a `ModelEndpoint`. -A `ModelBundle` consists of a trained model as well as the surrounding preprocessing and postprocessing code. -A `ModelEndpoint` is the compute layer that takes in a `ModelBundle`, and is able to carry out inference requests -by using the `ModelBundle` to carry out predictions. The `ModelEndpoint` also knows infrastructure-level details, -such as how many GPUs are needed, what type they are, how much memory, etc. The `ModelEndpoint` automatically handles -infrastructure level details such as autoscaling and task queueing. - -Steps to deploy your model via Scale Launch: - -1. First, you create and upload a `ModelBundle`. - -2. Then, you create a `ModelEndpoint`. - -3. Lastly, you make requests to the `ModelEndpoint`. - -TODO: link some example colab notebook - - -## For Developers - -Clone from github and install as editable - -``` -git clone git@github.com:scaleapi/launch-python-client.git -cd launch-python-client -pip3 install poetry -poetry install -``` - -Please install the pre-commit hooks by running the following command: - -```bash -poetry run pre-commit install -``` - -The tests can be run with: - -```bash -poetry run pytest -``` - -### Documentation - -**Updating documentation:** -We use [mkdocs](https://www.mkdocs.org/) to autogenerate our API Reference from docstrings and -markdown files. - -To test your local docstring changes, run the following commands from the repository's root directory: - -``` -poetry shell -mkdocs serve -``` - -The above command will spin up a server on localhost (port 8000 by default) that will watch for and -automatically rebuild a version of the API reference based on your local docstring and markdown -changes. diff --git a/docs/_images/model_bundle.png b/_images/model_bundle.png similarity index 100% rename from docs/_images/model_bundle.png rename to _images/model_bundle.png diff --git a/docs/_images/model_endpoint.png b/_images/model_endpoint.png similarity index 100% rename from docs/_images/model_endpoint.png rename to _images/model_endpoint.png diff --git a/docs/_images/request_lifecycle.png b/_images/request_lifecycle.png similarity index 100% rename from docs/_images/request_lifecycle.png rename to _images/request_lifecycle.png diff --git a/docs/_static/favicon-32x32.png b/_static/favicon-32x32.png similarity index 100% rename from docs/_static/favicon-32x32.png rename to _static/favicon-32x32.png diff --git a/docs/_static/favicon.ico b/_static/favicon.ico similarity index 100% rename from docs/_static/favicon.ico rename to _static/favicon.ico diff --git a/docs/_static/file.png b/_static/file.png similarity index 100% rename from docs/_static/file.png rename to _static/file.png diff --git a/docs/_static/launch-logo.svg b/_static/launch-logo.svg similarity index 100% rename from docs/_static/launch-logo.svg rename to _static/launch-logo.svg diff --git a/docs/_static/minus.png b/_static/minus.png similarity index 100% rename from docs/_static/minus.png rename to _static/minus.png diff --git a/docs/_static/plus.png b/_static/plus.png similarity index 100% rename from docs/_static/plus.png rename to _static/plus.png diff --git a/api/client/index.html b/api/client/index.html new file mode 100644 index 00000000..8ca2caed --- /dev/null +++ b/api/client/index.html @@ -0,0 +1,7713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Launch Client - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Launch Client

+ + +
+ + + + +

+ LaunchClient + + +

+
LaunchClient(api_key: str, endpoint: Optional[str] = None, self_hosted: bool = False, use_path_with_custom_endpoint: bool = False)
+
+ +
+ + +

Scale Launch Python Client.

+ +

Initializes a Scale Launch Client.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
api_key + str + +
+

Your Scale API key

+
+
+ required +
endpoint + Optional[str] + +
+

The Scale Launch Endpoint (this should not need to be changed)

+
+
+ None +
self_hosted + bool + +
+

True iff you are connecting to a self-hosted Scale Launch

+
+
+ False +
use_path_with_custom_endpoint + bool + +
+

True iff you are not using the default Scale Launch endpoint +but your endpoint has path routing (to SCALE_LAUNCH_VX_PATH) set up

+
+
+ False +
+ + + + +
+ + + + + + + + + + +
+ + + + +

+ batch_async_request + + +

+
batch_async_request(*, model_bundle: Union[ModelBundle, str], urls: Optional[List[str]] = None, inputs: Optional[List[Dict[str, Any]]] = None, batch_url_file_location: Optional[str] = None, serialization_format: str = 'JSON', labels: Optional[Dict[str, str]] = None, cpus: Optional[int] = None, memory: Optional[str] = None, gpus: Optional[int] = None, gpu_type: Optional[str] = None, storage: Optional[str] = None, max_workers: Optional[int] = None, per_worker: Optional[int] = None, timeout_seconds: Optional[float] = None) -> Dict[str, Any]
+
+ +
+ +

Sends a batch inference request using a given bundle. Returns a key that can be used to +retrieve the results of inference at a later time.

+

Must have exactly one of urls or inputs passed in.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle + Union[ModelBundle, str] + +
+

The bundle or the name of a the bundle to use for inference.

+
+
+ required +
urls + Optional[List[str]] + +
+

A list of urls, each pointing to a file containing model input. Must be +accessible by Scale Launch, hence urls need to either be public or signedURLs.

+
+
+ None +
inputs + Optional[List[Dict[str, Any]]] + +
+

A list of model inputs, if exists, we will upload the inputs and pass it in +to Launch.

+
+
+ None +
batch_url_file_location + Optional[str] + +
+

In self-hosted mode, the input to the batch job will be +uploaded to this location if provided. Otherwise, one will be determined from +bundle_location_fn()

+
+
+ None +
serialization_format + str + +
+

Serialization format of output, either 'PICKLE' or 'JSON'. +'pickle' corresponds to pickling results + returning

+
+
+ 'JSON' +
labels + Optional[Dict[str, str]] + +
+

An optional dictionary of key/value pairs to associate with this endpoint.

+
+
+ None +
cpus + Optional[int] + +
+

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater than +or equal to 1.

+
+
+ None +
memory + Optional[str] + +
+

Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must be +a positive amount of memory.

+
+
+ None +
storage + Optional[str] + +
+

Amount of local ephemeral storage each worker should get, e.g. "4Gi", "512Mi", +etc. This must be a positive amount of storage.

+
+
+ None +
gpus + Optional[int] + +
+

Number of gpus each worker should get, e.g. 0, 1, etc.

+
+
+ None +
max_workers + Optional[int] + +
+

The maximum number of workers. Must be greater than or equal to 0, and as +well as greater than or equal to min_workers.

+
+
+ None +
per_worker + Optional[int] + +
+

The maximum number of concurrent requests that an individual worker can +service. Launch automatically scales the number of workers for the endpoint so that +each worker is processing per_worker requests:

+
    +
  • If the average number of concurrent requests per worker is lower than + per_worker, then the number of workers will be reduced.
  • +
  • Otherwise, if the average number of concurrent requests per worker is higher + than per_worker, then the number of workers will be increased to meet the + elevated traffic.
  • +
+
+
+ None +
gpu_type + Optional[str] + +
+

If specifying a non-zero number of gpus, this controls the type of gpu +requested. Here are the supported values:

+
    +
  • nvidia-tesla-t4
  • +
  • nvidia-ampere-a10
  • +
  • nvidia-hopper-h100
  • +
  • nvidia-hopper-h100-1g20g
  • +
  • nvidia-hopper-h100-3g40g
  • +
+
+
+ None +
timeout_seconds + Optional[float] + +
+

The maximum amount of time (in seconds) that the batch job can take. +If not specified, the server defaults to 12 hours. This includes the time required +to build the endpoint and the total time required for all the individual tasks.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Dict[str, Any] + +
+

A dictionary that contains job_id as a key, and the ID as the value.

+
+
+ +
+ +
+ + +
+ + + + +

+ cancel_fine_tune + + +

+
cancel_fine_tune(fine_tune_id: str) -> CancelFineTuneResponse
+
+ +
+ +

Cancel a fine-tune

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fine_tune_id + str + +
+

ID of the fine-tune

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
CancelFineTuneResponse + CancelFineTuneResponse + +
+

whether the cancellation was successful

+
+
+ +
+ +
+ + +
+ + + + +

+ clone_model_bundle_with_changes + + +

+
clone_model_bundle_with_changes(model_bundle: Union[ModelBundle, str], app_config: Optional[Dict] = None) -> ModelBundle
+
+ +
+ + +
+ Warning +

This method is deprecated. Use +clone_model_bundle_with_changes_v2 instead.

+
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle + Union[ModelBundle, str] + +
+

The existing bundle or its ID.

+
+
+ required +
app_config + Optional[Dict] + +
+

The new bundle's app config, if not passed in, the new +bundle's app_config will be set to None

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ ModelBundle + +
+

A ModelBundle object

+
+
+ +
+ +
+ + +
+ + + + +

+ clone_model_bundle_with_changes_v2 + + +

+
clone_model_bundle_with_changes_v2(original_model_bundle_id: str, new_app_config: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response
+
+ +
+ +

Clone a model bundle with an optional new app_config.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
original_model_bundle_id + str + +
+

The ID of the model bundle you want to clone.

+
+
+ required +
new_app_config + Optional[Dict[str, Any]] + +
+

A dictionary of new app config values to use for the cloned model.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ CreateModelBundleV2Response + +
+

An object containing the following keys:

+
    +
  • model_bundle_id: The ID of the cloned model bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ completions_stream + + +

+
completions_stream(endpoint_name: str, prompt: str, max_new_tokens: int, temperature: float, stop_sequences: Optional[List[str]] = None, return_token_log_probs: Optional[bool] = False, timeout: float = DEFAULT_LLM_COMPLETIONS_TIMEOUT) -> Iterable[CompletionStreamV1Response]
+
+ +
+ +

Run prompt completion on an LLM endpoint in streaming fashion. Will fail if endpoint does not support streaming.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
endpoint_name + str + +
+

The name of the LLM endpoint to make the request to

+
+
+ required +
prompt + str + +
+

The prompt to send to the endpoint

+
+
+ required +
max_new_tokens + int + +
+

The maximum number of tokens to generate for each prompt

+
+
+ required +
temperature + float + +
+

The temperature to use for sampling

+
+
+ required +
stop_sequences + Optional[List[str]] + +
+

List of sequences to stop the completion at

+
+
+ None +
return_token_log_probs + Optional[bool] + +
+

Whether to return the log probabilities of the tokens

+
+
+ False +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Iterable[CompletionStreamV1Response] + +
+

Iterable responses for prompt completion

+
+
+ +
+ +
+ + +
+ + + + +

+ completions_sync + + +

+
completions_sync(endpoint_name: str, prompt: str, max_new_tokens: int, temperature: float, stop_sequences: Optional[List[str]] = None, return_token_log_probs: Optional[bool] = False, timeout: float = DEFAULT_LLM_COMPLETIONS_TIMEOUT) -> CompletionSyncV1Response
+
+ +
+ +

Run prompt completion on a sync LLM endpoint. Will fail if the endpoint is not sync.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
endpoint_name + str + +
+

The name of the LLM endpoint to make the request to

+
+
+ required +
prompt + str + +
+

The completion prompt to send to the endpoint

+
+
+ required +
max_new_tokens + int + +
+

The maximum number of tokens to generate for each prompt

+
+
+ required +
temperature + float + +
+

The temperature to use for sampling

+
+
+ required +
stop_sequences + Optional[List[str]] + +
+

List of sequences to stop the completion at

+
+
+ None +
return_token_log_probs + Optional[bool] + +
+

Whether to return the log probabilities of the tokens

+
+
+ False +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ CompletionSyncV1Response + +
+

Response for prompt completion

+
+
+ +
+ +
+ + +
+ + + + +

+ create_docker_image_batch_job + + +

+
create_docker_image_batch_job(*, labels: Dict[str, str], docker_image_batch_job_bundle: Optional[Union[str, DockerImageBatchJobBundleResponse]] = None, docker_image_batch_job_bundle_name: Optional[str] = None, job_config: Optional[Dict[str, Any]] = None, cpus: Optional[int] = None, memory: Optional[str] = None, gpus: Optional[int] = None, gpu_type: Optional[str] = None, storage: Optional[str] = None)
+
+ +
+ +

For self hosted mode only. +Parameters: + docker_image_batch_job_bundle: Specifies the docker image bundle to use for the batch job. + Either the string id of a docker image bundle, or a + DockerImageBatchJobBundleResponse object. + Only one of docker_image_batch_job_bundle and docker_image_batch_job_bundle_name + can be specified. + docker_image_batch_job_bundle_name: The name of a batch job bundle. If specified, + Launch will use the most recent bundle with that name owned by the current user. + Only one of docker_image_batch_job_bundle and docker_image_batch_job_bundle_name + can be specified. + labels: Kubernetes labels that are present on the batch job. + job_config: A JSON-serializable python object that will get passed to the batch job, + specifically as the contents of a file mounted at mount_location inside the bundle. + You can call python's json.load() on the file to retrieve the contents. + cpus: Optional override for the number of cpus to give to your job. Either the default + must be specified in the bundle, or this must be specified. + memory: Optional override for the amount of memory to give to your job. Either the default + must be specified in the bundle, or this must be specified. + gpus: Optional number of gpus to give to the bundle. If not specified in the bundle or + here, will be interpreted as 0 gpus. + gpu_type: Optional type of gpu. If the final number of gpus is positive, must be specified + either in the bundle or here. + storage: Optional reserved amount of disk to give to your batch job. If not specified, + your job may be evicted if it is using too much disk.

+ +
+ +
+ + +
+ + + + +

+ create_docker_image_batch_job_bundle + + +

+
create_docker_image_batch_job_bundle(*, name: str, image_repository: str, image_tag: str, command: List[str], env: Optional[Dict[str, str]] = None, mount_location: Optional[str] = None, cpus: Optional[int] = None, memory: Optional[str] = None, gpus: Optional[int] = None, gpu_type: Optional[str] = None, storage: Optional[str] = None) -> CreateDockerImageBatchJobBundleResponse
+
+ +
+ +

For self hosted mode only.

+

Creates a Docker Image Batch Job Bundle.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
name + str + +
+

A user-defined name for the bundle. Does not need to be unique.

+
+
+ required +
image_repository + str + +
+

The (short) repository of your image. For example, if your image is located at +123456789012.dkr.ecr.us-west-2.amazonaws.com/repo:tag, and your version of Launch +is configured to look at 123456789012.dkr.ecr.us-west-2.amazonaws.com for Docker Images, +you would pass the value repo for the image_repository parameter.

+
+
+ required +
image_tag + str + +
+

The tag of your image inside of the repo. In the example above, you would pass +the value tag for the image_tag parameter.

+
+
+ required +
command + List[str] + +
+

The command to run inside the docker image.

+
+
+ required +
env + Optional[Dict[str, str]] + +
+

A dictionary of environment variables to inject into your docker image.

+
+
+ None +
mount_location + Optional[str] + +
+

A location in the filesystem where you would like a json-formatted file, controllable +on runtime, to be mounted. This allows behavior to be specified on runtime. +(Specifically, the contents of this file can be read via json.load() inside of the +user-defined code.)

+
+
+ None +
cpus + Optional[int] + +
+

Optional default value for the number of cpus to give the job.

+
+
+ None +
memory + Optional[str] + +
+

Optional default value for the amount of memory to give the job.

+
+
+ None +
gpus + Optional[int] + +
+

Optional default value for the number of gpus to give the job.

+
+
+ None +
gpu_type + Optional[str] + +
+

Optional default value for the type of gpu to give the job.

+
+
+ None +
storage + Optional[str] + +
+

Optional default value for the amount of disk to give the job.

+
+
+ None +
+ +
+ +
+ + +
+ + + + +

+ create_fine_tune + + +

+
create_fine_tune(model: str, training_file: str, validation_file: Optional[str] = None, fine_tuning_method: Optional[str] = None, hyperparameters: Optional[Dict[str, str]] = None, wandb_config: Optional[Dict[str, Any]] = None, suffix: str = None) -> CreateFineTuneResponse
+
+ +
+ +

Create a fine-tune

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model + str + +
+

Identifier of base model to train from.

+
+
+ required +
training_file + str + +
+

Path to file of training dataset. +Dataset must be a csv with columns 'prompt' and 'response'.

+
+
+ required +
validation_file + Optional[str] + +
+

Path to file of validation dataset. +Has the same format as training_file. If not provided, we will generate a split +from the training dataset.

+
+
+ None +
fine_tuning_method + Optional[str] + +
+

Fine-tuning method. Currently unused, +but when different techniques are implemented we will expose this field.

+
+
+ None +
hyperparameters + Optional[Dict[str, str]] + +
+

Hyperparameters to pass in to training job.

+
+
+ None +
wandb_config + Optional[Dict[str, Any]] + +
+

Configuration for Weights and Biases. +To enable set hyperparameters["report_to"] to wandb. +api_key must be provided which is the API key.

+
+
+ None +
suffix + str + +
+

Optional user-provided identifier suffix for the fine-tuned model.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
CreateFineTuneResponse + CreateFineTuneResponse + +
+

ID of the created fine-tune

+
+
+ +
+ +
+ + +
+ + + + +

+ create_llm_model_endpoint + + +

+
create_llm_model_endpoint(endpoint_name: str, model_name: str, inference_framework_image_tag: str, source: LLMSource = LLMSource.HUGGING_FACE, inference_framework: LLMInferenceFramework = LLMInferenceFramework.DEEPSPEED, num_shards: int = 4, quantize: Optional[Quantization] = None, checkpoint_path: Optional[str] = None, cpus: int = 32, memory: str = '192Gi', storage: Optional[str] = None, gpus: int = 4, min_workers: int = 0, max_workers: int = 1, per_worker: int = 10, gpu_type: Optional[str] = 'nvidia-ampere-a10', endpoint_type: str = 'sync', high_priority: Optional[bool] = False, post_inference_hooks: Optional[List[PostInferenceHooks]] = None, default_callback_url: Optional[str] = None, default_callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, default_callback_auth_username: Optional[str] = None, default_callback_auth_password: Optional[str] = None, default_callback_auth_cert: Optional[str] = None, default_callback_auth_key: Optional[str] = None, public_inference: Optional[bool] = None, update_if_exists: bool = False, labels: Optional[Dict[str, str]] = None)
+
+ +
+ +

Creates and registers a model endpoint in Scale Launch. The returned object is an +instance of type Endpoint, which is a base class of either SyncEndpoint or +AsyncEndpoint. This is the object to which you sent inference requests.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
endpoint_name + str + +
+

The name of the model endpoint you want to create. The name +must be unique across all endpoints that you own.

+
+
+ required +
model_name + str + +
+

name for the LLM. List can be found at +(TODO: add list of supported models)

+
+
+ required +
inference_framework_image_tag + str + +
+

image tag for the inference framework. +(TODO: use latest image tag when unspecified)

+
+
+ required +
source + LLMSource + +
+

source of the LLM. Currently only HuggingFace is supported.

+
+
+ HUGGING_FACE +
inference_framework + LLMInferenceFramework + +
+

inference framework for the LLM. Currently only DeepSpeed is supported.

+
+
+ DEEPSPEED +
num_shards + int + +
+

number of shards for the LLM. When bigger than 1, LLM will be sharded +to multiple GPUs. Number of GPUs must be larger than num_shards.

+
+
+ 4 +
quantize + Optional[Quantization] + +
+

Quantization method for the LLM. Only affects behavior for text-generation-inference models.

+
+
+ None +
checkpoint_path + Optional[str] + +
+

Path to the checkpoint to load the model from. +Only affects behavior for text-generation-inference models.

+
+
+ None +
cpus + int + +
+

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater +than or equal to 1.

+
+
+ 32 +
memory + str + +
+

Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must +be a positive amount of memory.

+
+
+ '192Gi' +
storage + Optional[str] + +
+

Amount of local ephemeral storage each worker should get, e.g. "4Gi", +"512Mi", etc. This must be a positive amount of storage.

+
+
+ None +
gpus + int + +
+

Number of gpus each worker should get, e.g. 0, 1, etc.

+
+
+ 4 +
min_workers + int + +
+

The minimum number of workers. Must be greater than or equal to 0. This +should be determined by computing the minimum throughput of your workload and +dividing it by the throughput of a single worker. This field must be at least 1 +for synchronous endpoints.

+
+
+ 0 +
max_workers + int + +
+

The maximum number of workers. Must be greater than or equal to 0, +and as well as greater than or equal to min_workers. This should be determined by +computing the maximum throughput of your workload and dividing it by the throughput +of a single worker.

+
+
+ 1 +
per_worker + int + +
+

The maximum number of concurrent requests that an individual worker can +service. Launch automatically scales the number of workers for the endpoint so that +each worker is processing per_worker requests, subject to the limits defined by +min_workers and max_workers.

+
    +
  • If the average number of concurrent requests per worker is lower than +per_worker, then the number of workers will be reduced. - Otherwise, +if the average number of concurrent requests per worker is higher than +per_worker, then the number of workers will be increased to meet the elevated +traffic.
  • +
+

Here is our recommendation for computing per_worker:

+
    +
  1. Compute min_workers and max_workers per your minimum and maximum +throughput requirements. 2. Determine a value for the maximum number of +concurrent requests in the workload. Divide this number by max_workers. Doing +this ensures that the number of workers will "climb" to max_workers.
  2. +
+
+
+ 10 +
gpu_type + Optional[str] + +
+

If specifying a non-zero number of gpus, this controls the type of gpu +requested. Here are the supported values:

+
    +
  • nvidia-tesla-t4
  • +
  • nvidia-ampere-a10
  • +
  • nvidia-hopper-h100
  • +
  • nvidia-hopper-h100-1g20g
  • +
  • nvidia-hopper-h100-3g40g
  • +
+
+
+ 'nvidia-ampere-a10' +
endpoint_type + str + +
+

Either "sync" or "async".

+
+
+ 'sync' +
high_priority + Optional[bool] + +
+

Either True or False. Enabling this will allow the created +endpoint to leverage the shared pool of prewarmed nodes for faster spinup time.

+
+
+ False +
post_inference_hooks + Optional[List[PostInferenceHooks]] + +
+

List of hooks to trigger after inference tasks are served.

+
+
+ None +
default_callback_url + Optional[str] + +
+

The default callback url to use for async endpoints. +This can be overridden in the task parameters for each individual task. +post_inference_hooks must contain "callback" for the callback to be triggered.

+
+
+ None +
default_callback_auth_kind + Optional[Literal['basic', 'mtls']] + +
+

The default callback auth kind to use for async endpoints. +Either "basic" or "mtls". This can be overridden in the task parameters for each +individual task.

+
+
+ None +
default_callback_auth_username + Optional[str] + +
+

The default callback auth username to use. This only +applies if default_callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_password + Optional[str] + +
+

The default callback auth password to use. This only +applies if default_callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_cert + Optional[str] + +
+

The default callback auth cert to use. This only applies +if default_callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_key + Optional[str] + +
+

The default callback auth key to use. This only applies +if default_callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
public_inference + Optional[bool] + +
+

If True, this endpoint will be available to all user IDs for +inference.

+
+
+ None +
update_if_exists + bool + +
+

If True, will attempt to update the endpoint if it exists. +Otherwise, will unconditionally try to create a new endpoint. Note that endpoint +names for a given user must be unique, so attempting to call this function with +update_if_exists=False for an existing endpoint will raise an error.

+
+
+ False +
labels + Optional[Dict[str, str]] + +
+

An optional dictionary of key/value pairs to associate with this endpoint.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A Endpoint object that can be used to make requests to the endpoint.

+
+
+ +
+ +
+ + +
+ + + + +

+ create_model_bundle + + +

+
create_model_bundle(model_bundle_name: str, env_params: Dict[str, str], *, load_predict_fn: Optional[Callable[[LaunchModel_T], Callable[[Any], Any]]] = None, predict_fn_or_cls: Optional[Callable[[Any], Any]] = None, requirements: Optional[List[str]] = None, model: Optional[LaunchModel_T] = None, load_model_fn: Optional[Callable[[], LaunchModel_T]] = None, app_config: Optional[Union[Dict[str, Any], str]] = None, globals_copy: Optional[Dict[str, Any]] = None, request_schema: Optional[Type[BaseModel]] = None, response_schema: Optional[Type[BaseModel]] = None) -> ModelBundle
+
+ +
+ + +
+ Warning +

This method is deprecated. Use +create_model_bundle_from_callable_v2 instead.

+
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

The name of the model bundle you want to create. The name +must be unique across all bundles that you own.

+
+
+ required +
predict_fn_or_cls + Optional[Callable[[Any], Any]] + +
+

Function or a Callable class that runs end-to-end +(pre/post processing and model inference) on the call. i.e. +predict_fn_or_cls(REQUEST) -> RESPONSE.

+
+
+ None +
model + Optional[LaunchModel_T] + +
+

Typically a trained Neural Network, e.g. a Pytorch module.

+

Exactly one of model and load_model_fn must be provided.

+
+
+ None +
load_model_fn + Optional[Callable[[], LaunchModel_T]] + +
+

A function that, when run, loads a model. This function is essentially +a deferred wrapper around the model argument.

+

Exactly one of model and load_model_fn must be provided.

+
+
+ None +
load_predict_fn + Optional[Callable[[LaunchModel_T], Callable[[Any], Any]]] + +
+

Function that, when called with a model, returns a function that +carries out inference.

+

If model is specified, then this is equivalent +to: + load_predict_fn(model, app_config=optional_app_config]) -> predict_fn

+

Otherwise, if load_model_fn is specified, then this is equivalent to: +load_predict_fn(load_model_fn(), app_config=optional_app_config]) -> predict_fn

+

In both cases, predict_fn is then the inference function, i.e.: + predict_fn(REQUEST) -> RESPONSE

+
+
+ None +
requirements + Optional[List[str]] + +
+

A list of python package requirements, where each list element is of +the form <package_name>==<package_version>, e.g.

+

["tensorflow==2.3.0", "tensorflow-hub==0.11.0"]

+

If you do not pass in a value for requirements, then you must pass in +globals() for the globals_copy argument.

+
+
+ None +
app_config + Optional[Union[Dict[str, Any], str]] + +
+

Either a Dictionary that represents a YAML file contents or a local path +to a YAML file.

+
+
+ None +
env_params + Dict[str, str] + +
+

A dictionary that dictates environment information e.g. +the use of pytorch or tensorflow, which base image tag to use, etc. +Specifically, the dictionary should contain the following keys:

+
    +
  • +

    framework_type: either tensorflow or pytorch. - PyTorch fields: - +pytorch_image_tag: An image tag for the pytorch docker base image. The +list of tags can be found from https://hub.docker.com/r/pytorch/pytorch/tags. - +Example:

    +

    .. code-block:: python

    +

    { + "framework_type": "pytorch", + "pytorch_image_tag": "1.10.0-cuda11.3-cudnn8-runtime" + }

    +
  • +
  • +

    Tensorflow fields:

    +
      +
    • tensorflow_version: Version of tensorflow, e.g. "2.3.0".
    • +
    +
  • +
+
+
+ required +
globals_copy + Optional[Dict[str, Any]] + +
+

Dictionary of the global symbol table. Normally provided by +globals() built-in function.

+
+
+ None +
request_schema + Optional[Type[BaseModel]] + +
+

A pydantic model that represents the request schema for the model +bundle. This is used to validate the request body for the model bundle's endpoint.

+
+
+ None +
response_schema + Optional[Type[BaseModel]] + +
+

A pydantic model that represents the request schema for the model +bundle. This is used to validate the response for the model bundle's endpoint. +Note: If request_schema is specified, then response_schema must also be specified.

+
+
+ None +
+ +
+ +
+ + +
+ + + + +

+ create_model_bundle_from_callable_v2 + + +

+
create_model_bundle_from_callable_v2(*, model_bundle_name: str, load_predict_fn: Callable[[LaunchModel_T], Callable[[Any], Any]], load_model_fn: Callable[[], LaunchModel_T], request_schema: Type[BaseModel], response_schema: Type[BaseModel], requirements: Optional[List[str]] = None, pytorch_image_tag: Optional[str] = None, tensorflow_version: Optional[str] = None, custom_base_image_repository: Optional[str] = None, custom_base_image_tag: Optional[str] = None, app_config: Optional[Union[Dict[str, Any], str]] = None, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response
+
+ +
+ +

Uploads and registers a model bundle to Scale Launch.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

Name of the model bundle.

+
+
+ required +
load_predict_fn + Callable[[LaunchModel_T], Callable[[Any], Any]] + +
+

Function that takes in a model and returns a predict function. +When your model bundle is deployed, this predict function will be called as follows: +

input = {"input": "some input"} # or whatever your request schema is.
+
+def load_model_fn():
+    # load model
+    return model
+
+def load_predict_fn(model, app_config=None):
+    def predict_fn(input):
+        # do pre-processing
+        output = model(input)
+        # do post-processing
+        return output
+    return predict_fn
+
+predict_fn = load_predict_fn(load_model_fn(), app_config=optional_app_config)
+response = predict_fn(input)
+

+
+
+ required +
load_model_fn + Callable[[], LaunchModel_T] + +
+

A function that, when run, loads a model.

+
+
+ required +
request_schema + Type[BaseModel] + +
+

A pydantic model that represents the request schema for the model +bundle. This is used to validate the request body for the model bundle's endpoint.

+
+
+ required +
response_schema + Type[BaseModel] + +
+

A pydantic model that represents the request schema for the model +bundle. This is used to validate the response for the model bundle's endpoint.

+
+
+ required +
requirements + Optional[List[str]] + +
+

List of pip requirements.

+
+
+ None +
pytorch_image_tag + Optional[str] + +
+

The image tag for the PyTorch image that will be used to run the +bundle. Exactly one of pytorch_image_tag, tensorflow_version, or +custom_base_image_repository must be specified.

+
+
+ None +
tensorflow_version + Optional[str] + +
+

The version of TensorFlow that will be used to run the bundle. +If not specified, the default version will be used. Exactly one of +pytorch_image_tag, tensorflow_version, or custom_base_image_repository +must be specified.

+
+
+ None +
custom_base_image_repository + Optional[str] + +
+

The repository for a custom base image that will be +used to run the bundle. If not specified, the default base image will be used. +Exactly one of pytorch_image_tag, tensorflow_version, or +custom_base_image_repository must be specified.

+
+
+ None +
custom_base_image_tag + Optional[str] + +
+

The tag for a custom base image that will be used to run the +bundle. Must be specified if custom_base_image_repository is specified.

+
+
+ None +
app_config + Optional[Union[Dict[str, Any], str]] + +
+

An optional dictionary of configuration values that will be passed to the +bundle when it is run. These values can be accessed by the bundle via the +app_config global variable.

+
+
+ None +
metadata + Optional[Dict[str, Any]] + +
+

Metadata to record with the bundle.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ CreateModelBundleV2Response + +
+

An object containing the following keys:

+
    +
  • model_bundle_id: The ID of the created model bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ create_model_bundle_from_dirs + + +

+
create_model_bundle_from_dirs(*, model_bundle_name: str, base_paths: List[str], requirements_path: str, env_params: Dict[str, str], load_predict_fn_module_path: str, load_model_fn_module_path: str, app_config: Optional[Union[Dict[str, Any], str]] = None, request_schema: Optional[Type[BaseModel]] = None, response_schema: Optional[Type[BaseModel]] = None) -> ModelBundle
+
+ +
+ + +
+ Warning +

This method is deprecated. Use +create_model_bundle_from_dirs_v2 +instead.

+
+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

The name of the model bundle you want to create. The name +must be unique across all bundles that you own.

+
+
+ required +
base_paths + List[str] + +
+

The paths on the local filesystem where the bundle code lives.

+
+
+ required +
requirements_path + str + +
+

A path on the local filesystem where a requirements.txt file +lives.

+
+
+ required +
env_params + Dict[str, str] + +
+

A dictionary that dictates environment information e.g. +the use of pytorch or tensorflow, which base image tag to use, etc. +Specifically, the dictionary should contain the following keys:

+
    +
  • framework_type: either tensorflow or pytorch.
  • +
  • PyTorch fields:
      +
    • pytorch_image_tag: An image tag for the pytorch docker base image. The + list of tags can be found from https://hub.docker.com/r/pytorch/pytorch/tags
    • +
    +
  • +
+

Example: +

{
+    "framework_type": "pytorch",
+    "pytorch_image_tag": "1.10.0-cuda11.3-cudnn8-runtime",
+}
+

+
+
+ required +
load_predict_fn_module_path + str + +
+

A python module path for a function that, when called +with the output of load_model_fn_module_path, returns a function that carries out +inference.

+
+
+ required +
load_model_fn_module_path + str + +
+

A python module path for a function that returns a model. +The output feeds into the function located at load_predict_fn_module_path.

+
+
+ required +
app_config + Optional[Union[Dict[str, Any], str]] + +
+

Either a Dictionary that represents a YAML file contents or a local path +to a YAML file.

+
+
+ None +
request_schema + Optional[Type[BaseModel]] + +
+

A pydantic model that represents the request schema for the model +bundle. This is used to validate the request body for the model bundle's endpoint.

+
+
+ None +
response_schema + Optional[Type[BaseModel]] + +
+

A pydantic model that represents the request schema for the model +bundle. This is used to validate the response for the model bundle's endpoint. +Note: If request_schema is specified, then response_schema must also be specified.

+
+
+ None +
+ +
+ +
+ + +
+ + + + +

+ create_model_bundle_from_dirs_v2 + + +

+
create_model_bundle_from_dirs_v2(*, model_bundle_name: str, base_paths: List[str], load_predict_fn_module_path: str, load_model_fn_module_path: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], requirements_path: Optional[str] = None, pytorch_image_tag: Optional[str] = None, tensorflow_version: Optional[str] = None, custom_base_image_repository: Optional[str] = None, custom_base_image_tag: Optional[str] = None, app_config: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response
+
+ +
+ +

Packages up code from one or more local filesystem folders and uploads them as a bundle +to Scale Launch. In this mode, a bundle is just local code instead of a serialized object.

+

For example, if you have a directory structure like so, and your current working +directory is my_root:

+
   my_root/
+       my_module1/
+           __init__.py
+           ...files and directories
+           my_inference_file.py
+       my_module2/
+           __init__.py
+           ...files and directories
+
+

then calling create_model_bundle_from_dirs_v2 with base_paths=["my_module1", +"my_module2"] essentially creates a zip file without the root directory, e.g.:

+
   my_module1/
+       __init__.py
+       ...files and directories
+       my_inference_file.py
+   my_module2/
+       __init__.py
+       ...files and directories
+
+

and these contents will be unzipped relative to the server side application root. Bear +these points in mind when referencing Python module paths for this bundle. For instance, +if my_inference_file.py has def f(...) as the desired inference loading function, +then the load_predict_fn_module_path argument should be my_module1.my_inference_file.f.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

The name of the model bundle you want to create.

+
+
+ required +
base_paths + List[str] + +
+

A list of paths to directories that will be zipped up and uploaded +as a bundle. Each path must be relative to the current working directory.

+
+
+ required +
load_predict_fn_module_path + str + +
+

The Python module path to the function that will be +used to load the model for inference. This function should take in a path to a +model directory, and return a model object. The model object should be pickleable.

+
+
+ required +
load_model_fn_module_path + str + +
+

The Python module path to the function that will be +used to load the model for training. This function should take in a path to a +model directory, and return a model object. The model object should be pickleable.

+
+
+ required +
request_schema + Type[BaseModel] + +
+

A Pydantic model that defines the request schema for the bundle.

+
+
+ required +
response_schema + Type[BaseModel] + +
+

A Pydantic model that defines the response schema for the bundle.

+
+
+ required +
requirements_path + Optional[str] + +
+

Path to a requirements.txt file that will be used to install +dependencies for the bundle. This file must be relative to the current working +directory.

+
+
+ None +
pytorch_image_tag + Optional[str] + +
+

The image tag for the PyTorch image that will be used to run the +bundle. Exactly one of pytorch_image_tag, tensorflow_version, or +custom_base_image_repository must be specified.

+
+
+ None +
tensorflow_version + Optional[str] + +
+

The version of TensorFlow that will be used to run the bundle. +If not specified, the default version will be used. Exactly one of +pytorch_image_tag, tensorflow_version, or custom_base_image_repository +must be specified.

+
+
+ None +
custom_base_image_repository + Optional[str] + +
+

The repository for a custom base image that will be +used to run the bundle. If not specified, the default base image will be used. +Exactly one of pytorch_image_tag, tensorflow_version, or +custom_base_image_repository must be specified.

+
+
+ None +
custom_base_image_tag + Optional[str] + +
+

The tag for a custom base image that will be used to run the +bundle. Must be specified if custom_base_image_repository is specified.

+
+
+ None +
app_config + Optional[Dict[str, Any]] + +
+

An optional dictionary of configuration values that will be passed to the +bundle when it is run. These values can be accessed by the bundle via the +app_config global variable.

+
+
+ None +
metadata + Optional[Dict[str, Any]] + +
+

Metadata to record with the bundle.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ CreateModelBundleV2Response + +
+

An object containing the following keys:

+
    +
  • model_bundle_id: The ID of the created model bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ create_model_bundle_from_runnable_image_v2 + + +

+
create_model_bundle_from_runnable_image_v2(*, model_bundle_name: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], repository: str, tag: str, command: List[str], healthcheck_route: Optional[str] = None, predict_route: Optional[str] = None, env: Dict[str, str], readiness_initial_delay_seconds: int, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response
+
+ +
+ +

Create a model bundle from a runnable image. The specified command must start a process +that will listen for requests on port 5005 using HTTP.

+

Inference requests must be served at the POST /predict route while the GET /readyz route is a healthcheck.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

The name of the model bundle you want to create.

+
+
+ required +
request_schema + Type[BaseModel] + +
+

A Pydantic model that defines the request schema for the bundle.

+
+
+ required +
response_schema + Type[BaseModel] + +
+

A Pydantic model that defines the response schema for the bundle.

+
+
+ required +
repository + str + +
+

The name of the Docker repository for the runnable image.

+
+
+ required +
tag + str + +
+

The tag for the runnable image.

+
+
+ required +
command + List[str] + +
+

The command that will be used to start the process that listens for requests.

+
+
+ required +
predict_route + Optional[str] + +
+

The endpoint route on the runnable image that will be called.

+
+
+ None +
healthcheck_route + Optional[str] + +
+

The healthcheck endpoint route on the runnable image.

+
+
+ None +
env + Dict[str, str] + +
+

A dictionary of environment variables that will be passed to the bundle when it +is run.

+
+
+ required +
readiness_initial_delay_seconds + int + +
+

The number of seconds to wait for the HTTP server to become ready and +successfully respond on its healthcheck.

+
+
+ required +
metadata + Optional[Dict[str, Any]] + +
+

Metadata to record with the bundle.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ CreateModelBundleV2Response + +
+

An object containing the following keys:

+
    +
  • model_bundle_id: The ID of the created model bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ create_model_bundle_from_streaming_enhanced_runnable_image_v2 + + +

+
create_model_bundle_from_streaming_enhanced_runnable_image_v2(*, model_bundle_name: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], repository: str, tag: str, command: Optional[List[str]] = None, healthcheck_route: Optional[str] = None, predict_route: Optional[str] = None, streaming_command: List[str], streaming_predict_route: Optional[str] = None, env: Dict[str, str], readiness_initial_delay_seconds: int, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response
+
+ +
+ +

Create a model bundle from a runnable image. The specified command must start a process +that will listen for requests on port 5005 using HTTP.

+

Inference requests must be served at the POST /predict route while the GET /readyz route is a healthcheck.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

The name of the model bundle you want to create.

+
+
+ required +
request_schema + Type[BaseModel] + +
+

A Pydantic model that defines the request schema for the bundle.

+
+
+ required +
response_schema + Type[BaseModel] + +
+

A Pydantic model that defines the response schema for the bundle.

+
+
+ required +
repository + str + +
+

The name of the Docker repository for the runnable image.

+
+
+ required +
tag + str + +
+

The tag for the runnable image.

+
+
+ required +
command + Optional[List[str]] + +
+

The command that will be used to start the process that listens for requests if +this bundle is used as a SYNC or ASYNC endpoint.

+
+
+ None +
healthcheck_route + Optional[str] + +
+

The healthcheck endpoint route on the runnable image.

+
+
+ None +
predict_route + Optional[str] + +
+

The endpoint route on the runnable image that will be called if this bundle is used as a SYNC +or ASYNC endpoint.

+
+
+ None +
streaming_command + List[str] + +
+

The command that will be used to start the process that listens for +requests if this bundle is used as a STREAMING endpoint.

+
+
+ required +
streaming_predict_route + Optional[str] + +
+

The endpoint route on the runnable image that will be called if this bundle is used +as a STREAMING endpoint.

+
+
+ None +
env + Dict[str, str] + +
+

A dictionary of environment variables that will be passed to the bundle when it +is run.

+
+
+ required +
readiness_initial_delay_seconds + int + +
+

The number of seconds to wait for the HTTP server to become ready and +successfully respond on its healthcheck.

+
+
+ required +
metadata + Optional[Dict[str, Any]] + +
+

Metadata to record with the bundle.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ CreateModelBundleV2Response + +
+

An object containing the following keys:

+
    +
  • model_bundle_id: The ID of the created model bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ create_model_bundle_from_triton_enhanced_runnable_image_v2 + + +

+
create_model_bundle_from_triton_enhanced_runnable_image_v2(*, model_bundle_name: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], repository: str, tag: str, command: List[str], healthcheck_route: Optional[str] = None, predict_route: Optional[str] = None, env: Dict[str, str], readiness_initial_delay_seconds: int, triton_model_repository: str, triton_model_replicas: Optional[Dict[str, str]] = None, triton_num_cpu: float, triton_commit_tag: str, triton_storage: Optional[str] = None, triton_memory: Optional[str] = None, triton_readiness_initial_delay_seconds: int, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response
+
+ +
+ +

Create a model bundle from a runnable image and a tritonserver image.

+

Same requirements as :param:create_model_bundle_from_runnable_image_v2 with additional constraints necessary +for configuring tritonserver's execution.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

The name of the model bundle you want to create.

+
+
+ required +
request_schema + Type[BaseModel] + +
+

A Pydantic model that defines the request schema for the bundle.

+
+
+ required +
response_schema + Type[BaseModel] + +
+

A Pydantic model that defines the response schema for the bundle.

+
+
+ required +
repository + str + +
+

The name of the Docker repository for the runnable image.

+
+
+ required +
tag + str + +
+

The tag for the runnable image.

+
+
+ required +
command + List[str] + +
+

The command that will be used to start the process that listens for requests.

+
+
+ required +
predict_route + Optional[str] + +
+

The endpoint route on the runnable image that will be called.

+
+
+ None +
healthcheck_route + Optional[str] + +
+

The healthcheck endpoint route on the runnable image.

+
+
+ None +
env + Dict[str, str] + +
+

A dictionary of environment variables that will be passed to the bundle when it +is run.

+
+
+ required +
readiness_initial_delay_seconds + int + +
+

The number of seconds to wait for the HTTP server to +become ready and successfully respond on its healthcheck.

+
+
+ required +
triton_model_repository + str + +
+

The S3 prefix that contains the contents of the model +repository, formatted according to +https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_repository.md

+
+
+ required +
triton_model_replicas + Optional[Dict[str, str]] + +
+

If supplied, the name and number of replicas to make for each +model.

+
+
+ None +
triton_num_cpu + float + +
+

Number of CPUs, fractional, to allocate to tritonserver.

+
+
+ required +
triton_commit_tag + str + +
+

The image tag of the specific trionserver version.

+
+
+ required +
triton_storage + Optional[str] + +
+

Amount of storage space to allocate for the tritonserver container.

+
+
+ None +
triton_memory + Optional[str] + +
+

Amount of memory to allocate for the tritonserver container.

+
+
+ None +
triton_readiness_initial_delay_seconds + int + +
+

Like readiness_initial_delay_seconds, but for +tritonserver's own healthcheck.

+
+
+ required +
metadata + Optional[Dict[str, Any]] + +
+

Metadata to record with the bundle.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ CreateModelBundleV2Response + +
+

An object containing the following keys:

+
    +
  • model_bundle_id: The ID of the created model bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ create_model_endpoint + + +

+
create_model_endpoint(*, endpoint_name: str, model_bundle: Union[ModelBundle, str], cpus: int = 3, memory: str = '8Gi', storage: str = '16Gi', gpus: int = 0, min_workers: int = 1, max_workers: int = 1, per_worker: int = 10, gpu_type: Optional[str] = None, endpoint_type: str = 'sync', high_priority: Optional[bool] = False, post_inference_hooks: Optional[List[PostInferenceHooks]] = None, default_callback_url: Optional[str] = None, default_callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, default_callback_auth_username: Optional[str] = None, default_callback_auth_password: Optional[str] = None, default_callback_auth_cert: Optional[str] = None, default_callback_auth_key: Optional[str] = None, public_inference: Optional[bool] = None, update_if_exists: bool = False, labels: Optional[Dict[str, str]] = None) -> Optional[Endpoint]
+
+ +
+ +

Creates and registers a model endpoint in Scale Launch. The returned object is an +instance of type Endpoint, which is a base class of either SyncEndpoint or +AsyncEndpoint. This is the object to which you sent inference requests.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
endpoint_name + str + +
+

The name of the model endpoint you want to create. The name +must be unique across all endpoints that you own.

+
+
+ required +
model_bundle + Union[ModelBundle, str] + +
+

The ModelBundle that the endpoint should serve.

+
+
+ required +
cpus + int + +
+

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater +than or equal to 1.

+
+
+ 3 +
memory + str + +
+

Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must +be a positive amount of memory.

+
+
+ '8Gi' +
storage + str + +
+

Amount of local ephemeral storage each worker should get, e.g. "4Gi", +"512Mi", etc. This must be a positive amount of storage.

+
+
+ '16Gi' +
gpus + int + +
+

Number of gpus each worker should get, e.g. 0, 1, etc.

+
+
+ 0 +
min_workers + int + +
+

The minimum number of workers. Must be greater than or equal to 0. This +should be determined by computing the minimum throughput of your workload and +dividing it by the throughput of a single worker. This field must be at least 1 +for synchronous endpoints.

+
+
+ 1 +
max_workers + int + +
+

The maximum number of workers. Must be greater than or equal to 0, +and as well as greater than or equal to min_workers. This should be determined by +computing the maximum throughput of your workload and dividing it by the throughput +of a single worker.

+
+
+ 1 +
per_worker + int + +
+

The maximum number of concurrent requests that an individual worker can +service. Launch automatically scales the number of workers for the endpoint so that +each worker is processing per_worker requests, subject to the limits defined by +min_workers and max_workers.

+
    +
  • If the average number of concurrent requests per worker is lower than +per_worker, then the number of workers will be reduced. - Otherwise, +if the average number of concurrent requests per worker is higher than +per_worker, then the number of workers will be increased to meet the elevated +traffic.
  • +
+

Here is our recommendation for computing per_worker:

+
    +
  1. Compute min_workers and max_workers per your minimum and maximum +throughput requirements. 2. Determine a value for the maximum number of +concurrent requests in the workload. Divide this number by max_workers. Doing +this ensures that the number of workers will "climb" to max_workers.
  2. +
+
+
+ 10 +
gpu_type + Optional[str] + +
+

If specifying a non-zero number of gpus, this controls the type of gpu +requested. Here are the supported values:

+
    +
  • nvidia-tesla-t4
  • +
  • nvidia-ampere-a10
  • +
  • nvidia-hopper-h100
  • +
  • nvidia-hopper-h100-1g20g
  • +
  • nvidia-hopper-h100-3g40g
  • +
+
+
+ None +
endpoint_type + str + +
+

Either "sync", "async", or "streaming".

+
+
+ 'sync' +
high_priority + Optional[bool] + +
+

Either True or False. Enabling this will allow the created +endpoint to leverage the shared pool of prewarmed nodes for faster spinup time.

+
+
+ False +
post_inference_hooks + Optional[List[PostInferenceHooks]] + +
+

List of hooks to trigger after inference tasks are served.

+
+
+ None +
default_callback_url + Optional[str] + +
+

The default callback url to use for async endpoints. +This can be overridden in the task parameters for each individual task. +post_inference_hooks must contain "callback" for the callback to be triggered.

+
+
+ None +
default_callback_auth_kind + Optional[Literal['basic', 'mtls']] + +
+

The default callback auth kind to use for async endpoints. +Either "basic" or "mtls". This can be overridden in the task parameters for each +individual task.

+
+
+ None +
default_callback_auth_username + Optional[str] + +
+

The default callback auth username to use. This only +applies if default_callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_password + Optional[str] + +
+

The default callback auth password to use. This only +applies if default_callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_cert + Optional[str] + +
+

The default callback auth cert to use. This only applies +if default_callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_key + Optional[str] + +
+

The default callback auth key to use. This only applies +if default_callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
public_inference + Optional[bool] + +
+

If True, this endpoint will be available to all user IDs for +inference.

+
+
+ None +
update_if_exists + bool + +
+

If True, will attempt to update the endpoint if it exists. +Otherwise, will unconditionally try to create a new endpoint. Note that endpoint +names for a given user must be unique, so attempting to call this function with +update_if_exists=False for an existing endpoint will raise an error.

+
+
+ False +
labels + Optional[Dict[str, str]] + +
+

An optional dictionary of key/value pairs to associate with this endpoint.

+
+
+ None +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Optional[Endpoint] + +
+

A Endpoint object that can be used to make requests to the endpoint.

+
+
+ +
+ +
+ + +
+ + + + +

+ delete_file + + +

+
delete_file(file_id: str) -> DeleteFileResponse
+
+ +
+ +

Delete a file

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file_id + str + +
+

ID of the file

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
DeleteFileResponse + DeleteFileResponse + +
+

whether the deletion was successful

+
+
+ +
+ +
+ + +
+ + + + +

+ delete_llm_model_endpoint + + +

+
delete_llm_model_endpoint(model_endpoint_name: str) -> bool
+
+ +
+ +

Deletes an LLM model endpoint.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_endpoint_name + str + +
+

The name of the model endpoint to delete.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ delete_model_endpoint + + +

+
delete_model_endpoint(model_endpoint_name: str)
+
+ +
+ +

Deletes a model endpoint.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_endpoint + +
+

A ModelEndpoint object.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ edit_model_endpoint + + +

+
edit_model_endpoint(*, model_endpoint: Union[ModelEndpoint, str], model_bundle: Optional[Union[ModelBundle, str]] = None, cpus: Optional[float] = None, memory: Optional[str] = None, storage: Optional[str] = None, gpus: Optional[int] = None, min_workers: Optional[int] = None, max_workers: Optional[int] = None, per_worker: Optional[int] = None, gpu_type: Optional[str] = None, high_priority: Optional[bool] = None, post_inference_hooks: Optional[List[PostInferenceHooks]] = None, default_callback_url: Optional[str] = None, default_callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, default_callback_auth_username: Optional[str] = None, default_callback_auth_password: Optional[str] = None, default_callback_auth_cert: Optional[str] = None, default_callback_auth_key: Optional[str] = None, public_inference: Optional[bool] = None) -> None
+
+ +
+ +

Edits an existing model endpoint. Here are the fields that cannot be edited on an +existing endpoint:

+
    +
  • The endpoint's name. - The endpoint's type (i.e. you cannot go from a SyncEnpdoint +to an AsyncEndpoint or vice versa.
  • +
+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_endpoint + Union[ModelEndpoint, str] + +
+

The model endpoint (or its name) you want to edit. The name +must be unique across all endpoints that you own.

+
+
+ required +
model_bundle + Optional[Union[ModelBundle, str]] + +
+

The ModelBundle that the endpoint should serve.

+
+
+ None +
cpus + Optional[float] + +
+

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater +than or equal to 1.

+
+
+ None +
memory + Optional[str] + +
+

Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must +be a positive amount of memory.

+
+
+ None +
storage + Optional[str] + +
+

Amount of local ephemeral storage each worker should get, e.g. "4Gi", +"512Mi", etc. This must be a positive amount of storage.

+
+
+ None +
gpus + Optional[int] + +
+

Number of gpus each worker should get, e.g. 0, 1, etc.

+
+
+ None +
min_workers + Optional[int] + +
+

The minimum number of workers. Must be greater than or equal to 0.

+
+
+ None +
max_workers + Optional[int] + +
+

The maximum number of workers. Must be greater than or equal to 0, +and as well as greater than or equal to min_workers.

+
+
+ None +
per_worker + Optional[int] + +
+

The maximum number of concurrent requests that an individual worker can +service. Launch automatically scales the number of workers for the endpoint so that +each worker is processing per_worker requests:

+
    +
  • If the average number of concurrent requests per worker is lower than +per_worker, then the number of workers will be reduced. - Otherwise, +if the average number of concurrent requests per worker is higher than +per_worker, then the number of workers will be increased to meet the elevated +traffic.
  • +
+
+
+ None +
gpu_type + Optional[str] + +
+

If specifying a non-zero number of gpus, this controls the type of gpu +requested. Here are the supported values:

+
    +
  • nvidia-tesla-t4
  • +
  • nvidia-ampere-a10
  • +
  • nvidia-hopper-h100
  • +
  • nvidia-hopper-h100-1g20g
  • +
  • nvidia-hopper-h100-3g40g
  • +
+
+
+ None +
high_priority + Optional[bool] + +
+

Either True or False. Enabling this will allow the created +endpoint to leverage the shared pool of prewarmed nodes for faster spinup time.

+
+
+ None +
post_inference_hooks + Optional[List[PostInferenceHooks]] + +
+

List of hooks to trigger after inference tasks are served.

+
+
+ None +
default_callback_url + Optional[str] + +
+

The default callback url to use for async endpoints. +This can be overridden in the task parameters for each individual task. +post_inference_hooks must contain "callback" for the callback to be triggered.

+
+
+ None +
default_callback_auth_kind + Optional[Literal['basic', 'mtls']] + +
+

The default callback auth kind to use for async endpoints. +Either "basic" or "mtls". This can be overridden in the task parameters for each +individual task.

+
+
+ None +
default_callback_auth_username + Optional[str] + +
+

The default callback auth username to use. This only +applies if default_callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_password + Optional[str] + +
+

The default callback auth password to use. This only +applies if default_callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_cert + Optional[str] + +
+

The default callback auth cert to use. This only applies +if default_callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
default_callback_auth_key + Optional[str] + +
+

The default callback auth key to use. This only applies +if default_callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
public_inference + Optional[bool] + +
+

If True, this endpoint will be available to all user IDs for +inference.

+
+
+ None +
+ +
+ +
+ + +
+ + + + +

+ get_batch_async_response + + +

+
get_batch_async_response(batch_job_id: str) -> Dict[str, Any]
+
+ +
+ +

Gets inference results from a previously created batch job.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
batch_job_id + str + +
+

An id representing the batch task job. This id is the in the response from +calling batch_async_request.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDescription
+ Dict[str, Any] + +
+

A dictionary that contains the following fields:

+
+
+ Dict[str, Any] + +
+
    +
  • status: The status of the job.
  • +
+
+
+ Dict[str, Any] + +
+
    +
  • result: The url where the result is stored.
  • +
+
+
+ Dict[str, Any] + +
+
    +
  • duration: A string representation of how long the job took to finish + or how long it has been running, for a job current in progress.
  • +
+
+
+ Dict[str, Any] + +
+
    +
  • num_tasks_pending: The number of tasks that are still pending.
  • +
+
+
+ Dict[str, Any] + +
+
    +
  • num_tasks_completed: The number of tasks that have completed.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ get_docker_image_batch_job + + +

+
get_docker_image_batch_job(batch_job_id: str)
+
+ +
+ +

For self hosted mode only. Gets information about a batch job given a batch job id.

+ +
+ +
+ + +
+ + + + +

+ get_docker_image_batch_job_bundle + + +

+
get_docker_image_batch_job_bundle(docker_image_batch_job_bundle_id: str) -> DockerImageBatchJobBundleResponse
+
+ +
+ +

For self hosted mode only. Gets information for a single batch job bundle with a given id.

+ +
+ +
+ + +
+ + + + +

+ get_file + + +

+
get_file(file_id: str) -> GetFileResponse
+
+ +
+ +

Get metadata about a file

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file_id + str + +
+

ID of the file

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
GetFileResponse + GetFileResponse + +
+

ID, filename, and size of the requested file

+
+
+ +
+ +
+ + +
+ + + + +

+ get_file_content + + +

+
get_file_content(file_id: str) -> GetFileContentResponse
+
+ +
+ +

Get a file's content

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file_id + str + +
+

ID of the file

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
GetFileContentResponse + GetFileContentResponse + +
+

ID and content of the requested file

+
+
+ +
+ +
+ + +
+ + + + +

+ get_fine_tune + + +

+
get_fine_tune(fine_tune_id: str) -> GetFineTuneResponse
+
+ +
+ +

Get status of a fine-tune

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fine_tune_id + str + +
+

ID of the fine-tune

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
GetFineTuneResponse + GetFineTuneResponse + +
+

ID and status of the requested fine-tune

+
+
+ +
+ +
+ + +
+ + + + +

+ get_fine_tune_events + + +

+
get_fine_tune_events(fine_tune_id: str) -> GetFineTuneEventsResponse
+
+ +
+ +

Get list of fine-tune events

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fine_tune_id + str + +
+

ID of the fine-tune

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
GetFineTuneEventsResponse + GetFineTuneEventsResponse + +
+

a list of all the events of the fine-tune

+
+
+ +
+ +
+ + +
+ + + + +

+ get_latest_docker_image_batch_job_bundle + + +

+
get_latest_docker_image_batch_job_bundle(bundle_name: str) -> DockerImageBatchJobBundleResponse
+
+ +
+ +

For self hosted mode only. Gets information for the latest batch job bundle with a given name.

+ +
+ +
+ + +
+ + + + +

+ get_latest_model_bundle_v2 + + +

+
get_latest_model_bundle_v2(model_bundle_name: str) -> ModelBundleV2Response
+
+ +
+ +

Get the latest version of a model bundle.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_name + str + +
+

The name of the model bundle you want to get.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ ModelBundleV2Response + +
+

An object containing the following keys:

+
    +
  • id: The ID of the model bundle.
  • +
  • name: The name of the model bundle.
  • +
  • schema_location: The location of the schema for the model bundle.
  • +
  • flavor: The flavor of the model bundle. Either RunnableImage, + CloudpickleArtifact, ZipArtifact, or TritonEnhancedRunnableImageFlavor.
  • +
  • created_at: The time the model bundle was created.
  • +
  • metadata: A dictionary of metadata associated with the model bundle.
  • +
  • model_artifact_ids: A list of IDs of model artifacts associated with the + bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ get_llm_model_endpoint + + +

+
get_llm_model_endpoint(endpoint_name: str) -> Optional[Union[AsyncEndpoint, SyncEndpoint, StreamingEndpoint]]
+
+ +
+ +

Gets a model endpoint associated with a name that the user has access to.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
endpoint_name + str + +
+

The name of the endpoint to retrieve.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ get_model_bundle + + +

+
get_model_bundle(model_bundle: Union[ModelBundle, str]) -> ModelBundle
+
+ +
+ +

Returns a model bundle specified by bundle_name that the user owns.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle + Union[ModelBundle, str] + +
+

The bundle or its name.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ ModelBundle + +
+

A ModelBundle object

+
+
+ +
+ +
+ + +
+ + + + +

+ get_model_bundle_v2 + + +

+
get_model_bundle_v2(model_bundle_id: str) -> ModelBundleV2Response
+
+ +
+ +

Get a model bundle.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_bundle_id + str + +
+

The ID of the model bundle you want to get.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ ModelBundleV2Response + +
+

An object containing the following fields:

+
    +
  • id: The ID of the model bundle.
  • +
  • name: The name of the model bundle.
  • +
  • flavor: The flavor of the model bundle. Either RunnableImage, + CloudpickleArtifact, ZipArtifact, or TritonEnhancedRunnableImageFlavor.
  • +
  • created_at: The time the model bundle was created.
  • +
  • metadata: A dictionary of metadata associated with the model bundle.
  • +
  • model_artifact_ids: A list of IDs of model artifacts associated with the + bundle.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ get_model_endpoint + + +

+
get_model_endpoint(endpoint_name: str) -> Optional[Union[AsyncEndpoint, SyncEndpoint]]
+
+ +
+ +

Gets a model endpoint associated with a name.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
endpoint_name + str + +
+

The name of the endpoint to retrieve.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ list_docker_image_batch_job_bundles + + +

+
list_docker_image_batch_job_bundles(bundle_name: Optional[str] = None, order_by: Optional[Literal['newest', 'oldest']] = None) -> ListDockerImageBatchJobBundleResponse
+
+ +
+ +

For self hosted mode only. Gets information for multiple bundles.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
bundle_name + Optional[str] + +
+

The name of the bundles to retrieve. If not specified, this will retrieve all

+
+
+ None +
order_by + Optional[Literal['newest', 'oldest']] + +
+

Either "newest", "oldest", or not specified. Specify to sort by newest/oldest.

+
+
+ None +
+ +
+ +
+ + +
+ + + + +

+ list_files + + +

+
list_files() -> ListFilesResponse
+
+ +
+ +

List files

+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
ListFilesResponse + ListFilesResponse + +
+

list of all files (ID, filename, and size)

+
+
+ +
+ +
+ + +
+ + + + +

+ list_fine_tunes + + +

+
list_fine_tunes() -> ListFineTunesResponse
+
+ +
+ +

List fine-tunes

+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
ListFineTunesResponse + ListFineTunesResponse + +
+

list of all fine-tunes and their statuses

+
+
+ +
+ +
+ + +
+ + + + +

+ list_llm_model_endpoints + + +

+
list_llm_model_endpoints() -> List[Endpoint]
+
+ +
+ +

Lists all LLM model endpoints that the user has access to.

+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ List[Endpoint] + +
+

A list of ModelEndpoint objects.

+
+
+ +
+ +
+ + +
+ + + + +

+ list_model_bundles + + +

+
list_model_bundles() -> List[ModelBundle]
+
+ +
+ +

Returns a list of model bundles that the user owns.

+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ List[ModelBundle] + +
+

A list of ModelBundle objects

+
+
+ +
+ +
+ + +
+ + + + +

+ list_model_bundles_v2 + + +

+
list_model_bundles_v2() -> ListModelBundlesV2Response
+
+ +
+ +

List all model bundles.

+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ ListModelBundlesV2Response + +
+

An object containing the following keys:

+
    +
  • model_bundles: A list of model bundles. Each model bundle is an object.
  • +
+
+
+ +
+ +
+ + +
+ + + + +

+ list_model_endpoints + + +

+
list_model_endpoints() -> List[Endpoint]
+
+ +
+ +

Lists all model endpoints that the user owns.

+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ List[Endpoint] + +
+

A list of ModelEndpoint objects.

+
+
+ +
+ +
+ + +
+ + + + +

+ model_download + + +

+
model_download(model_name: str, download_format: str = 'hugging_face') -> ModelDownloadResponse
+
+ +
+ +

download a finetuned model

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_name + str + +
+

name of the model to download

+
+
+ required +
download_format + str + +
+

format of the model to download

+
+
+ 'hugging_face' +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
ModelDownloadResponse + ModelDownloadResponse + +
+

dictionary with file names and urls to download the model

+
+
+ +
+ +
+ + +
+ + + + +

+ read_endpoint_creation_logs + + +

+
read_endpoint_creation_logs(model_endpoint: Union[ModelEndpoint, str])
+
+ +
+ +

Retrieves the logs for the creation of the endpoint.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_endpoint + Union[ModelEndpoint, str] + +
+

The endpoint or its name.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ register_batch_csv_location_fn + + +

+
register_batch_csv_location_fn(batch_csv_location_fn: Callable[[], str])
+
+ +
+ +

For self-hosted mode only. Registers a function that gives a location for batch CSV +inputs. Should give different locations each time. This function is called as +batch_csv_location_fn(), and should return a batch_csv_url that upload_batch_csv_fn can +take.

+

Strictly, batch_csv_location_fn() does not need to return a str. The only requirement is +that if batch_csv_location_fn returns a value of type T, then upload_batch_csv_fn() takes +in an object of type T as its second argument (i.e. batch_csv_url).

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
batch_csv_location_fn + Callable[[], str] + +
+

Function that generates batch_csv_urls for upload_batch_csv_fn.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ register_bundle_location_fn + + +

+
register_bundle_location_fn(bundle_location_fn: Callable[[], str])
+
+ +
+ +

For self-hosted mode only. Registers a function that gives a location for a model bundle. +Should give different locations each time. This function is called as +bundle_location_fn(), and should return a bundle_url that +register_upload_bundle_fn can take.

+

Strictly, bundle_location_fn() does not need to return a str. The only +requirement is that if bundle_location_fn returns a value of type T, +then upload_bundle_fn() takes in an object of type T as its second argument (i.e. +bundle_url).

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
bundle_location_fn + Callable[[], str] + +
+

Function that generates bundle_urls for upload_bundle_fn.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ register_upload_batch_csv_fn + + +

+
register_upload_batch_csv_fn(upload_batch_csv_fn: Callable[[str, str], None])
+
+ +
+ +

For self-hosted mode only. Registers a function that handles batch text upload. This +function is called as

+
upload_batch_csv_fn(csv_text, csv_url)
+
+

This function should directly write the contents of csv_text as a text string into +csv_url.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
upload_batch_csv_fn + Callable[[str, str], None] + +
+

Function that takes in a csv text (string type), +and uploads that bundle to an appropriate location. Only needed for self-hosted mode.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ register_upload_bundle_fn + + +

+
register_upload_bundle_fn(upload_bundle_fn: Callable[[str, str], None])
+
+ +
+ +

For self-hosted mode only. Registers a function that handles model bundle upload. This +function is called as

+
upload_bundle_fn(serialized_bundle, bundle_url)
+
+

This function should directly write the contents of serialized_bundle as a +binary string into bundle_url.

+

See register_bundle_location_fn for more notes on the signature of upload_bundle_fn

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
upload_bundle_fn + Callable[[str, str], None] + +
+

Function that takes in a serialized bundle (bytes type), +and uploads that bundle to an appropriate location. Only needed for self-hosted mode.

+
+
+ required +
+ +
+ +
+ + +
+ + + + +

+ update_docker_image_batch_job + + +

+
update_docker_image_batch_job(batch_job_id: str, cancel: bool)
+
+ +
+ +

For self hosted mode only. Updates a batch job by id. +Use this if you want to cancel/delete a batch job.

+ +
+ +
+ + +
+ + + + +

+ upload_file + + +

+
upload_file(file_path: str) -> UploadFileResponse
+
+ +
+ +

Upload a file

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file_path + str + +
+

Path to a local file to upload.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
UploadFileResponse + UploadFileResponse + +
+

ID of the created file

+
+
+ +
+ +
+ + + +
+ +
+ +
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/api/endpoint_predictions/index.html b/api/endpoint_predictions/index.html new file mode 100644 index 00000000..479b350c --- /dev/null +++ b/api/endpoint_predictions/index.html @@ -0,0 +1,1393 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Endpoint Predictions - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Endpoint Predictions

+ + +
+ + + + +

+ EndpointRequest + + +

+
EndpointRequest(url: Optional[str] = None, args: Optional[Dict] = None, callback_url: Optional[str] = None, callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, callback_auth_username: Optional[str] = None, callback_auth_password: Optional[str] = None, callback_auth_cert: Optional[str] = None, callback_auth_key: Optional[str] = None, return_pickled: Optional[bool] = False, request_id: Optional[str] = None)
+
+ +
+ + +

Represents a single request to either a SyncEndpoint, StreamingEndpoint, or AsyncEndpoint.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
url + Optional[str] + +
+

A url to some file that can be read in to a ModelBundle's predict function. Can be an image, raw text, etc. +Note: the contents of the file located at url are opened as a sequence of bytes and passed +to the predict function. If you instead want to pass the url itself as an input to the predict function, +see args.

+

Exactly one of url and args must be specified.

+
+
+ None +
args + Optional[Dict] + +
+

A Dictionary with arguments to a ModelBundle's predict function. If the predict function has signature +predict_fn(foo, bar), then the keys in the dictionary should be "foo" and "bar". +Values must be native Python objects.

+

Exactly one of url and args must be specified.

+
+
+ None +
return_pickled + Optional[bool] + +
+

Whether the output should be a pickled python object, or directly returned serialized json.

+
+
+ False +
callback_url + Optional[str] + +
+

The callback url to use for this task. If None, then the +default_callback_url of the endpoint is used. The endpoint must specify +"callback" as a post-inference hook for the callback to be triggered.

+
+
+ None +
callback_auth_kind + Optional[Literal['basic', 'mtls']] + +
+

The default callback auth kind to use for async endpoints. +Either "basic" or "mtls". This can be overridden in the task parameters for each +individual task.

+
+
+ None +
callback_auth_username + Optional[str] + +
+

The default callback auth username to use. This only +applies if callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
callback_auth_password + Optional[str] + +
+

The default callback auth password to use. This only +applies if callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
callback_auth_cert + Optional[str] + +
+

The default callback auth cert to use. This only applies +if callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
callback_auth_key + Optional[str] + +
+

The default callback auth key to use. This only applies +if callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
request_id + Optional[str] + +
+

(deprecated) A user-specifiable id for requests. +Should be unique among EndpointRequests made in the same batch call. +If one isn't provided the client will generate its own.

+
+
+ None +
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + + +

+ EndpointResponse + + +

+
EndpointResponse(client, status: str, result_url: Optional[str] = None, result: Optional[str] = None, traceback: Optional[str] = None)
+
+ +
+ + +

Represents a response received from a Endpoint.

+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
client + +
+

An instance of LaunchClient.

+
+
+ required +
status + str + +
+

A string representing the status of the request, i.e. SUCCESS, FAILURE, or PENDING

+
+
+ required +
result_url + Optional[str] + +
+

A string that is a url containing the pickled python object from the +Endpoint's predict function.

+

Exactly one of result_url or result will be populated, +depending on the value of return_pickled in the request.

+
+
+ None +
result + Optional[str] + +
+

A string that is the serialized return value (in json form) of the Endpoint's predict function. +Specifically, one can json.loads() the value of result to get the original python object back.

+

Exactly one of result_url or result will be populated, +depending on the value of return_pickled in the request.

+
+
+ None +
traceback + Optional[str] + +
+

The stack trace if the inference endpoint raised an error. Can be used for debugging

+
+
+ None +
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + + +

+ EndpointResponseFuture + + +

+
EndpointResponseFuture(client, endpoint_name: str, async_task_id: str)
+
+ +
+ + +

Represents a future response from an Endpoint. Specifically, when the EndpointResponseFuture is ready, +then its get method will return an actual instance of EndpointResponse.

+

This object should not be directly instantiated by the user.

+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
client + +
+

An instance of LaunchClient.

+
+
+ required +
endpoint_name + str + +
+

The name of the endpoint.

+
+
+ required +
async_task_id + str + +
+

An async task id.

+
+
+ required +
+ + + + +
+ + + + + + + + + + +
+ + + + +

+ get + + +

+
get(timeout: Optional[float] = None) -> EndpointResponse
+
+ +
+ +

Retrieves the EndpointResponse for the prediction request after it completes. This method blocks.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
timeout + Optional[float] + +
+

The maximum number of seconds to wait for the response. If None, then +the method will block indefinitely until the response is ready.

+
+
+ None +
+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + + +

+ EndpointResponseStream + + +

+
EndpointResponseStream(response)
+
+ +
+

+ Bases: Iterator

+ + +

Represents a stream response from an Endpoint. This object is iterable and yields +EndpointResponse objects.

+

This object should not be directly instantiated by the user.

+ + + + +
+ + + + + + + + + + +
+ + + + +

+ __iter__ + + +

+
__iter__()
+
+ +
+ +

Uses server-sent events to iterate through the stream.

+ +
+ +
+ + +
+ + + + +

+ __next__ + + +

+
__next__()
+
+ +
+ +

Uses server-sent events to iterate through the stream.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/api/hooks/index.html b/api/hooks/index.html new file mode 100644 index 00000000..11800188 --- /dev/null +++ b/api/hooks/index.html @@ -0,0 +1,758 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Hooks - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Hooks

+ +
+ + + + +

+ PostInferenceHooks + + +

+ + +
+

+ Bases: str, Enum

+ + +

Post-inference hooks are functions that are called after inference is complete.

+ + + +

Attributes:

+ + + + + + + + + + + + + + + +
NameTypeDescription
CALLBACK + str + +
+

The callback hook is called with the inference response and the task ID.

+
+
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/api/llms/index.html b/api/llms/index.html new file mode 100644 index 00000000..b6eba259 --- /dev/null +++ b/api/llms/index.html @@ -0,0 +1,735 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + LLM APIs - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

LLM APIs

+

We provide some APIs to conveniently create, list and inference with LLMs. Under the hood they are Launch model endpoints.

+

Example

+
LLM APIs Usage
import os
+
+from rich import print
+
+from launch import LaunchClient
+from launch.api_client.model.llm_inference_framework import (
+    LLMInferenceFramework,
+)
+from launch.api_client.model.llm_source import LLMSource
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"), endpoint=os.getenv("LAUNCH_ENDPOINT"))
+
+endpoints = client.list_llm_model_endpoints()
+
+print(endpoints)
+
+endpoint_name = "test-flan-t5-xxl"
+client.create_llm_model_endpoint(
+    endpoint_name=endpoint_name,
+    model_name="flan-t5-xxl",
+    source=LLMSource.HUGGING_FACE,
+    inference_framework=LLMInferenceFramework.DEEPSPEED,
+    inference_framework_image_tag=os.getenv("INFERENCE_FRAMEWORK_IMAGE_TAG"),
+    num_shards=4,
+    min_workers=1,
+    max_workers=1,
+    gpus=4,
+    endpoint_type="sync",
+)
+
+# Wait for the endpoint to be ready
+
+output = client.completions_sync(endpoint_name, prompt="What is Deep Learning?", max_new_tokens=10, temperature=0)
+print(output)
+
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/api/model_bundles/index.html b/api/model_bundles/index.html new file mode 100644 index 00000000..c58b8561 --- /dev/null +++ b/api/model_bundles/index.html @@ -0,0 +1,2485 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Model Bundles - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Model Bundles

+ + +
+ + + + +

+ CloudpickleArtifactFlavor + + +

+ + +
+

+ Bases: BaseModel

+ + + + + +
+ + + + + + + +
+ + + + +

+ app_config + + + + instance-attribute + + +

+
app_config: Optional[Dict[str, Any]]
+
+ +
+ +

Optional configuration for the application.

+
+ +
+ +
+ + + + +

+ framework + + + + class-attribute + instance-attribute + + +

+
framework: Union[PytorchFramework, TensorflowFramework, CustomFramework] = Field(..., discriminator='framework_type')
+
+ +
+ +

Machine Learning framework specification. Either +PytorchFramework, +TensorflowFramework, or +CustomFramework.

+
+ +
+ +
+ + + + +

+ load_model_fn + + + + instance-attribute + + +

+
load_model_fn: str
+
+ +
+ +

Function which, when called, returns the model object.

+
+ +
+ +
+ + + + +

+ load_predict_fn + + + + instance-attribute + + +

+
load_predict_fn: str
+
+ +
+ +

Function which, when called, returns the prediction function.

+
+ +
+ +
+ + + + +

+ requirements + + + + instance-attribute + + +

+
requirements: List[str]
+
+ +
+ +

List of requirements to install in the environment before running the model.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ CreateModelBundleV2Response + + +

+ + +
+

+ Bases: BaseModel

+ + +

Response object for creating a Model Bundle.

+ + + + +
+ + + + + + + +
+ + + + +

+ model_bundle_id + + + + instance-attribute + + +

+
model_bundle_id: str
+
+ +
+ +

ID of the Model Bundle.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ CustomFramework + + +

+ + +
+

+ Bases: BaseModel

+ + + + + +
+ + + + + + + +
+ + + + +

+ image_repository + + + + instance-attribute + + +

+
image_repository: str
+
+ +
+ +

Docker image repository to use as the base image.

+
+ +
+ +
+ + + + +

+ image_tag + + + + instance-attribute + + +

+
image_tag: str
+
+ +
+ +

Docker image tag to use as the base image.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ ListModelBundlesV2Response + + +

+ + +
+

+ Bases: BaseModel

+ + +

Response object for listing Model Bundles.

+ + + + +
+ + + + + + + +
+ + + + +

+ model_bundles + + + + instance-attribute + + +

+
model_bundles: List[ModelBundleV2Response]
+
+ +
+ +

A list of Model Bundles.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ ModelBundle + + + + dataclass + + +

+ + +
+ + +

Represents a ModelBundle.

+ + + + +
+ + + + + + + +
+ + + + +

+ app_config + + + + class-attribute + instance-attribute + + +

+
app_config: Optional[Dict[Any, Any]] = None
+
+ +
+ +

An optional user-specified configuration mapping for the bundle.

+
+ +
+ +
+ + + + +

+ env_params + + + + class-attribute + instance-attribute + + +

+
env_params: Optional[Dict[str, str]] = None
+
+ +
+ +

A dictionary that dictates environment information. See LaunchClient.create_model_bundle +for more information.

+
+ +
+ +
+ + + + +

+ id + + + + class-attribute + instance-attribute + + +

+
id: Optional[str] = None
+
+ +
+ +

A globally unique identifier for the bundle.

+
+ +
+ +
+ + + + +

+ location + + + + class-attribute + instance-attribute + + +

+
location: Optional[str] = None
+
+ +
+ +

An opaque location for the bundle.

+
+ +
+ +
+ + + + +

+ metadata + + + + class-attribute + instance-attribute + + +

+
metadata: Optional[Dict[Any, Any]] = None
+
+ +
+ +

Arbitrary metadata for the bundle.

+
+ +
+ +
+ + + + +

+ name + + + + instance-attribute + + +

+
name: str
+
+ +
+ +

The name of the bundle. Must be unique across all bundles that the user owns.

+
+ +
+ +
+ + + + +

+ packaging_type + + + + class-attribute + instance-attribute + + +

+
packaging_type: Optional[str] = None
+
+ +
+ +

The packaging type for the bundle. Can be cloudpickle or zip.

+
+ +
+ +
+ + + + +

+ requirements + + + + class-attribute + instance-attribute + + +

+
requirements: Optional[List[str]] = None
+
+ +
+ +

A list of Python package requirements for the bundle. See LaunchClient.create_model_bundle +for more information.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ ModelBundleV2Response + + +

+ + +
+

+ Bases: BaseModel

+ + +

Response object for a single Model Bundle.

+ + + + +
+ + + + + + + +
+ + + + +

+ created_at + + + + instance-attribute + + +

+
created_at: datetime.datetime
+
+ +
+ +

Timestamp of when the Model Bundle was created.

+
+ +
+ +
+ + + + +

+ flavor + + + + class-attribute + instance-attribute + + +

+
flavor: ModelBundleFlavors = Field(..., discriminator='flavor')
+
+ +
+ +

Flavor of the Model Bundle, representing how the model bundle was packaged.

+

See ModelBundleFlavors for details.

+
+ +
+ +
+ + + + +

+ id + + + + instance-attribute + + +

+
id: str
+
+ +
+ +

ID of the Model Bundle.

+
+ +
+ +
+ + + + +

+ metadata + + + + instance-attribute + + +

+
metadata: Dict[str, Any]
+
+ +
+ +

Metadata associated with the Model Bundle.

+
+ +
+ +
+ + + + +

+ model_artifact_ids + + + + instance-attribute + + +

+
model_artifact_ids: List[str]
+
+ +
+ +

IDs of the Model Artifacts associated with the Model Bundle.

+
+ +
+ +
+ + + + +

+ name + + + + instance-attribute + + +

+
name: str
+
+ +
+ +

Name of the Model Bundle.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ PytorchFramework + + +

+ + +
+

+ Bases: BaseModel

+ + + + + +
+ + + + + + + +
+ + + + +

+ pytorch_image_tag + + + + instance-attribute + + +

+
pytorch_image_tag: str
+
+ +
+ +

Image tag of the Pytorch image to use.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ RunnableImageFlavor + + +

+ + +
+

+ Bases: RunnableImageLike

+ + +

Model bundles that use custom docker images that expose an HTTP server for inference.

+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + + +

+ TensorflowFramework + + +

+ + +
+

+ Bases: BaseModel

+ + + + + +
+ + + + + + + +
+ + + + +

+ tensorflow_version + + + + instance-attribute + + +

+
tensorflow_version: str
+
+ +
+ +

Tensorflow version to use.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + + +

+ ZipArtifactFlavor + + +

+ + +
+

+ Bases: BaseModel

+ + + + + +
+ + + + + + + +
+ + + + +

+ app_config + + + + class-attribute + instance-attribute + + +

+
app_config: Optional[Dict[str, Any]] = None
+
+ +
+ +

Optional configuration for the application.

+
+ +
+ +
+ + + + +

+ framework + + + + class-attribute + instance-attribute + + +

+
framework: Union[PytorchFramework, TensorflowFramework, CustomFramework] = Field(..., discriminator='framework_type')
+
+ +
+ +

Machine Learning framework specification. Either +PytorchFramework, +TensorflowFramework, or +CustomFramework.

+
+ +
+ +
+ + + + +

+ load_model_fn_module_path + + + + instance-attribute + + +

+
load_model_fn_module_path: str
+
+ +
+ +

Path to the module to load the model object.

+
+ +
+ +
+ + + + +

+ load_predict_fn_module_path + + + + instance-attribute + + +

+
load_predict_fn_module_path: str
+
+ +
+ +

Path to the module to load the prediction function.

+
+ +
+ +
+ + + + +

+ requirements + + + + instance-attribute + + +

+
requirements: List[str]
+
+ +
+ +

List of requirements to install in the environment before running the model.

+
+ +
+ + + + + +
+ +
+ +
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/api/model_endpoints/index.html b/api/model_endpoints/index.html new file mode 100644 index 00000000..5277237f --- /dev/null +++ b/api/model_endpoints/index.html @@ -0,0 +1,1381 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Model Endpoints - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Model Endpoints

+

All classes here are returned by the +get_model_endpoint +method and provide a predict function.

+ + +
+ + + + +

+ AsyncEndpoint + + +

+
AsyncEndpoint(model_endpoint: ModelEndpoint, client: ModelEndpoint)
+
+ +
+

+ Bases: Endpoint

+ + +

An asynchronous model endpoint.

+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_endpoint + ModelEndpoint + +
+

ModelEndpoint object.

+
+
+ required +
client + +
+

A LaunchClient object

+
+
+ required +
+ + + + +
+ + + + + + + + + + +
+ + + + +

+ predict + + +

+
predict(request: EndpointRequest) -> EndpointResponseFuture
+
+ +
+ +

Runs an asynchronous prediction request.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
request + EndpointRequest + +
+

The EndpointRequest object that contains the payload.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + + + + + + + + + +
Name TypeDescription
+ EndpointResponseFuture + +
+

An EndpointResponseFuture such the user can use to query the status of the request.

+
+
Example + EndpointResponseFuture + +
+ +
+
+ EndpointResponseFuture + +
+

.. code-block:: python

+

my_endpoint = AsyncEndpoint(...) +f: EndpointResponseFuture = my_endpoint.predict(EndpointRequest(...)) +result = f.get() # blocks on completion

+
+
+ +
+ +
+ + +
+ + + + +

+ predict_batch + + +

+
predict_batch(requests: Sequence[EndpointRequest]) -> AsyncEndpointBatchResponse
+
+ +
+ +

(deprecated) +Runs inference on the data items specified by urls. Returns a AsyncEndpointResponse.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
requests + Sequence[EndpointRequest] + +
+

List of EndpointRequests. Request_ids must all be distinct.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ AsyncEndpointBatchResponse + +
+

an AsyncEndpointResponse keeping track of the inference requests made

+
+
+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + + +

+ SyncEndpoint + + +

+
SyncEndpoint(model_endpoint: ModelEndpoint, client: ModelEndpoint)
+
+ +
+

+ Bases: Endpoint

+ + +

A synchronous model endpoint.

+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_endpoint + ModelEndpoint + +
+

ModelEndpoint object.

+
+
+ required +
client + +
+

A LaunchClient object

+
+
+ required +
+ + + + +
+ + + + + + + + + + +
+ + + + +

+ predict + + +

+
predict(request: EndpointRequest) -> EndpointResponse
+
+ +
+ +

Runs a synchronous prediction request.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
request + EndpointRequest + +
+

The EndpointRequest object that contains the payload.

+
+
+ required +
+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + + +

+ StreamingEndpoint + + +

+
StreamingEndpoint(model_endpoint: ModelEndpoint, client: ModelEndpoint)
+
+ +
+

+ Bases: Endpoint

+ + +

A synchronous model endpoint.

+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model_endpoint + ModelEndpoint + +
+

ModelEndpoint object.

+
+
+ required +
client + +
+

A LaunchClient object

+
+
+ required +
+ + + + +
+ + + + + + + + + + +
+ + + + +

+ predict + + +

+
predict(request: EndpointRequest) -> EndpointResponseStream
+
+ +
+ +

Runs a streaming prediction request.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
request + EndpointRequest + +
+

The EndpointRequest object that contains the payload.

+
+
+ required +
+ + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ EndpointResponseStream + +
+

An EndpointResponseStream object that can be used to iterate through the stream.

+
+
+ +
+ +
+ + + +
+ +
+ +
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 00000000..049a254b --- /dev/null +++ b/assets/_mkdocstrings.css @@ -0,0 +1,64 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Keep headings consistent. */ +h1.doc-heading, +h2.doc-heading, +h3.doc-heading, +h4.doc-heading, +h5.doc-heading, +h6.doc-heading { + font-weight: 400; + line-height: 1.5; + color: inherit; + text-transform: none; +} + +h1.doc-heading { + font-size: 1.6rem; +} + +h2.doc-heading { + font-size: 1.2rem; +} + +h3.doc-heading { + font-size: 1.15rem; +} + +h4.doc-heading { + font-size: 1.10rem; +} + +h5.doc-heading { + font-size: 1.05rem; +} + +h6.doc-heading { + font-size: 1rem; +} \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.220ee61c.min.js b/assets/javascripts/bundle.220ee61c.min.js new file mode 100644 index 00000000..116072a1 --- /dev/null +++ b/assets/javascripts/bundle.220ee61c.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Ci=Object.create;var gr=Object.defineProperty;var Ri=Object.getOwnPropertyDescriptor;var ki=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Hi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,nn=Object.prototype.propertyIsEnumerable;var rn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&rn(e,r,t[r]);if(Ht)for(var r of Ht(t))nn.call(t,r)&&rn(e,r,t[r]);return e};var on=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&nn.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Pi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of ki(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=Ri(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ci(Hi(e)):{},Pi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var sn=Pt((xr,an)=>{(function(e,t){typeof xr=="object"&&typeof an!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(O){return!!(O&&O!==document&&O.nodeName!=="HTML"&&O.nodeName!=="BODY"&&"classList"in O&&"contains"in O.classList)}function f(O){var Qe=O.type,De=O.tagName;return!!(De==="INPUT"&&s[Qe]&&!O.readOnly||De==="TEXTAREA"&&!O.readOnly||O.isContentEditable)}function c(O){O.classList.contains("focus-visible")||(O.classList.add("focus-visible"),O.setAttribute("data-focus-visible-added",""))}function u(O){O.hasAttribute("data-focus-visible-added")&&(O.classList.remove("focus-visible"),O.removeAttribute("data-focus-visible-added"))}function p(O){O.metaKey||O.altKey||O.ctrlKey||(a(r.activeElement)&&c(r.activeElement),n=!0)}function m(O){n=!1}function d(O){a(O.target)&&(n||f(O.target))&&c(O.target)}function h(O){a(O.target)&&(O.target.classList.contains("focus-visible")||O.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(O.target))}function v(O){document.visibilityState==="hidden"&&(o&&(n=!0),Y())}function Y(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function B(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(O){O.target.nodeName&&O.target.nodeName.toLowerCase()==="html"||(n=!1,B())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),Y(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var cn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},s=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(B,N){d.append(N,B)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(O){throw new Error("URL unable to set base "+c+" due to "+O)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,Y=!0,B=this;["append","delete","set"].forEach(function(O){var Qe=h[O];h[O]=function(){Qe.apply(h,arguments),v&&(Y=!1,B.search=h.toString(),Y=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,Y&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(f){Object.defineProperty(s,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){a(f)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var qr=Pt((Mt,Nr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Mt=="object"&&typeof Nr=="object"?Nr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ai}});var s=i(279),a=i.n(s),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(T){return!1}}var d=function(T){var E=p()(T);return m("cut"),E},h=d;function v(j){var T=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[T?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var Y=function(T,E){var H=v(T);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},B=function(T){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof T=="string"?H=Y(T,E):T instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(T==null?void 0:T.type)?H=Y(T.value,E):(H=p()(T),m("copy")),H},N=B;function O(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?O=function(E){return typeof E}:O=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},O(j)}var Qe=function(){var T=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=T.action,H=E===void 0?"copy":E,I=T.container,q=T.target,Me=T.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&O(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function Ei(j,T){if(!(j instanceof T))throw new TypeError("Cannot call a class as a function")}function tn(j,T){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=c()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ai=Li},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,f){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(f))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return c(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),s=f.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var f=this;function c(){f.off(i,c),s.apply(a,arguments)}return c._=s,this.on(i,c,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=a.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var rs=/["'&<>]/;Yo.exports=ns;function ns(e){var t=""+e,r=rs.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof et?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function pn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,f){s=e[i](s),o(a,f,s.done,s.value)})}}function o(i,s,a,f){Promise.resolve(f).then(function(c){i({value:c,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),f=a.next();!f.done;f=a.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{ln(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ln(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function ln(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new xn(r,n)},t}(F);var xn=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,f=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Sn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Oe=new Sn(wn);var M=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Te(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=zi();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return un(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return Ni(e);if(pt(e))return qi(e);if(Nt(e))return Ki(e);if(Kt(e))return On(e);if(Gt(e))return Qi(e);if(Jt(e))return Yi(e)}throw Qt(e)}function Ni(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function qi(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Dn(function(){return new Zt}))}}function Vn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,f=a===void 0?!0:a;return function(c){var u,p,m,d=0,h=!1,v=!1,Y=function(){p==null||p.unsubscribe(),p=void 0},B=function(){Y(),u=m=void 0,h=v=!1},N=function(){var O=u;B(),O==null||O.unsubscribe()};return y(function(O,Qe){d++,!v&&!h&&Y();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,f))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,Y(),p=$r(B,o,$e),De.error($e)},complete:function(){h=!0,Y(),p=$r(B,s),De.complete()}}),U(O).subscribe(u))})(c)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),J())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Kn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>rr(e)),V(rr(e)))}var Yn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Wr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),va?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Wr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ba.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Gn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Jn=typeof WeakMap!="undefined"?new WeakMap:new Yn,Xn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ga.getInstance(),n=new La(t,r,this);Jn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Xn.prototype[e]=function(){var t;return(t=Jn.get(this))[e].apply(t,arguments)}});var Aa=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Xn}(),Zn=Aa;var eo=new x,Ca=$(()=>k(new Zn(e=>{for(let t of e)eo.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ca.pipe(S(t=>t.observe(e)),g(t=>eo.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var to=new x,Ra=$(()=>k(new IntersectionObserver(e=>{for(let t of e)to.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function sr(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function ro(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),J())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function no(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function ka(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ha(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function oo(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:no("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!ka(n,r)}return!0}),pe());return Ha().pipe(g(t=>t?M:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function io(){return new x}function ao(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)ao(e,r)}function _(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)ao(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function so(){return location.hash.substring(1)}function Dr(e){let t=_("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Pa(e){return L(b(window,"hashchange"),e).pipe(l(so),V(so()),A(t=>t.length>0),X(1))}function co(e){return Pa(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function Vr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function fo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function zr(e,t){return e.pipe(g(r=>r?t():M))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>M),g(r=>r.status!==200?Ot(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),X(1))}function uo(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),X(1))}function pr(e){let t=_("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Ot(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function po(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function lo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(po),V(po()))}function mo(){return{width:innerWidth,height:innerHeight}}function ho(){return b(window,"resize",{passive:!0}).pipe(l(mo),V(mo()))}function bo(){return G([lo(),ho()]).pipe(l(([e,t])=>({offset:e,size:t})),X(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(ee("size")),o=G([n,r]).pipe(l(()=>Xe(e)));return G([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:f,y:c}])=>({offset:{x:s.x-f,y:s.y-c+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,f,c)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:f,error:c});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

CLI

+ +

Launch comes with a CLI for listing bundles / endpoints, editing endpoints, +and sending tasks to endpoints.

+

The CLI can be used as scale-launch ....

+

Help

+

Run scale-launch --help for more options.

+
scale-launch --help
    This is the command line interface (CLI) package for Scale Launch.
+
+       ██╗      █████╗ ██╗   ██╗███╗   ██╗ ██████╗██╗  ██╗
+       ██║     ██╔══██╗██║   ██║████╗  ██║██╔════╝██║  ██║
+       ██║     ███████║██║   ██║██╔██╗ ██║██║     ███████║
+       ██║     ██╔══██║██║   ██║██║╚██╗██║██║     ██╔══██║
+       ███████╗██║  ██║╚██████╔╝██║ ╚████║╚██████╗██║  ██║
+       ╚══════╝╚═╝  ╚═╝ ╚═════╝ ╚═╝  ╚═══╝ ╚═════╝╚═╝  ╚═╝
+
+Usage: scale-launch [OPTIONS] COMMAND [ARGS]...
+
+Options:
+  --help  Show this message and exit.
+
+Commands:
+  batch-jobs  Batch Jobs is a wrapper around batch jobs in Scale Launch
+  bundles     Bundles is a wrapper around model bundles in Scale Launch
+  config      Config is a wrapper around getting and setting your API key and other configuration options
+  endpoints   Endpoints is a wrapper around model endpoints in Scale Launch
+  tasks       Tasks is a wrapper around sending requests to endpoints
+
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/concepts/batch_jobs/index.html b/concepts/batch_jobs/index.html new file mode 100644 index 00000000..ff76fd28 --- /dev/null +++ b/concepts/batch_jobs/index.html @@ -0,0 +1,691 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Batch Jobs - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Batch Jobs

+

For predicting over a larger set of tasks (> 50) at once, it is recommended to +use batch jobs. Batch jobs are a way to send a large number of tasks to a model +bundle. The tasks are processed in parallel, and the results are returned as a +list of predictions.

+

Batch jobs are created using the +batch_async_request +method of the +LaunchClient.

+
Creating and Following a Batch Job
import logging
+import os
+import time
+from launch import LaunchClient
+
+logger = logging.getLogger(__name__)
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+batch_job = client.batch_async_request(
+    model_bundle="test-bundle",
+    inputs=[
+        {"x": 2, "y": "hello"},
+        {"x": 3, "y": "world"},
+    ],
+    gpus=0,
+    labels={
+        "team": "MY_TEAM",
+        "product": "MY_PRODUCT",
+    }
+)
+
+status = "PENDING"
+res = None
+while status != "SUCCESS" and status != "FAILURE" and status != "CANCELLED":
+    time.sleep(30)
+    res = client.get_batch_async_response(batch_job["job_id"])
+    status = res["status"]
+    logging.info(f"the batch job is {status}")
+
+logging.info(res)
+
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/concepts/callbacks/index.html b/concepts/callbacks/index.html new file mode 100644 index 00000000..9f5a284d --- /dev/null +++ b/concepts/callbacks/index.html @@ -0,0 +1,852 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Callbacks - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Callbacks

+

Async model endpoints can be configured to send callbacks to a user-defined +callback URL. Callbacks are sent as HTTP POST requests with a JSON body. The +following code snippet shows how to create an async model endpoint with a +callback URL.

+

To configure an async endpoint to send callbacks, set the post_inference_hooks +field to include +launch.PostInferenceHooks.CALLBACK. +A callback URL also needs to be specified, and it can be configured as a default +using the default_callback_url argument to +launch.LaunchClient.create_model_endpoint +or as a per-task override using the callback_url field of +launch.EndpointRequest.

+
+

Note

+

Callbacks will not be sent if the endpoint does not have any post-inference +hooks specified, even if a default_callback_url is provided to the endpoint +creation method or if the prediction request has a callback_url override.

+
+
Creating an Async Model Endpoint with a Callback URL
import os
+import time
+from launch import EndpointRequest, LaunchClient, PostInferenceHooks
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.create_model_endpoint(
+    endpoint_name="demo-endpoint-callback",
+    model_bundle="test-bundle",
+    cpus=1,
+    min_workers=1,
+    endpoint_type="async",
+    update_if_exists=True,
+    labels={
+        "team": "MY_TEAM",
+        "product": "MY_PRODUCT",
+    },
+    post_inference_hooks=[PostInferenceHooks.CALLBACK],
+    default_callback_url="https://example.com",
+)
+
+while endpoint.status() != "READY":
+    time.sleep(10)
+
+future_default = endpoint.predict(
+    request=EndpointRequest(args={"x": 2, "y": "hello"})
+)
+"""
+A callback is sent to https://example.com with the following JSON body:
+{
+    "task_id": "THE_TASK_ID",
+    "result": 7
+}
+"""
+
+future_custom_callback_url = endpoint.predict(
+    request=EndpointRequest(
+        args={"x": 3, "y": "hello"}, callback_url="https://example.com/custom"
+    ),
+)
+
+"""
+A callback is sent to https://example.com/custom with the following JSON body:
+{
+    "task_id": "THE_TASK_ID",
+    "result": 8
+}
+"""
+
+

Authentication for callbacks

+
+

Warning

+

This feature is currently in beta, and the API is likely to change.

+
+

Callbacks can be authenticated using shared authentication headers. To enable authentication, +set either default_callback_auth_kind when creating the endpoint or callback_auth_kind +when making a prediction request.

+

Currently, the supported authentication methods are basic and mtls. If basic is used, +then the default_callback_auth_username and default_callback_auth_password fields must be +specified when creating the endpoint, or the callback_auth_username and callback_auth_password +fields must be specified when making a prediction request. If mtls is used, then the +same is true for the default_callback_auth_cert and default_callback_auth_key fields, +or the callback_auth_cert and callback_auth_key fields.

+
Creating an Async Model Endpoint with custom Callback auth
import os
+import time
+from launch import EndpointRequest, LaunchClient, PostInferenceHooks
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.create_model_endpoint(
+    endpoint_name="demo-endpoint-callback",
+    model_bundle="test-bundle",
+    cpus=1,
+    min_workers=1,
+    endpoint_type="async",
+    update_if_exists=True,
+    labels={
+        "team": "MY_TEAM",
+        "product": "MY_PRODUCT",
+    },
+    post_inference_hooks=[PostInferenceHooks.CALLBACK],
+    default_callback_url="https://example.com",
+    default_callback_auth_kind="basic",
+    default_callback_auth_username="user",
+    default_callback_auth_password="password",
+)
+
+while endpoint.status() != "READY":
+    time.sleep(10)
+
+future_default = endpoint.predict(
+    request=EndpointRequest(args={"x": 2, "y": "hello"})
+)
+"""
+A callback is sent to https://example.com with ("user", "password") as the basic auth.
+"""
+
+future_custom_callback_auth = endpoint.predict(
+    request=EndpointRequest(
+        args={"x": 3, "y": "hello"},
+        callback_auth_kind="mtls", 
+        callback_auth_cert="cert", 
+        callback_auth_key="key",
+    ),
+)
+"""
+A callback is sent with mTLS authentication.
+"""
+
+client.edit_model_endpoint(
+    model_endpoint=endpoint.model_endpoint,
+    default_callback_auth_kind="mtls",
+    default_callback_auth_cert="cert",
+    default_callback_auth_key="key",
+)
+
+while endpoint.status() != "READY":
+    time.sleep(10)
+
+future_default = endpoint.predict(
+    request=EndpointRequest(args={"x": 2, "y": "hello"})
+)
+"""
+A callback is sent with mTLS auth.
+"""
+
+future_custom_callback_auth = endpoint.predict(
+    request=EndpointRequest(
+        args={"x": 3, "y": "hello"},
+        callback_auth_kind="basic",
+        callback_auth_username="user",
+        callback_auth_password="pass",
+    ),
+)
+"""
+A callback is sent with ("user", "pass") as the basic auth.
+"""
+
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/concepts/endpoint_predictions/index.html b/concepts/endpoint_predictions/index.html new file mode 100644 index 00000000..419f8664 --- /dev/null +++ b/concepts/endpoint_predictions/index.html @@ -0,0 +1,1431 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Endpoint Predictions - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Endpoint Predictions

+

Once endpoints have been created, users can send tasks to them to make +predictions. The following code snippet shows how to send tasks to endpoints.

+
+
+
+
import os
+from launch import EndpointRequest, LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.get_model_endpoint("demo-endpoint-async")
+future = endpoint.predict(request=EndpointRequest(args={"x": 2, "y": "hello"}))
+response = future.get()
+print(response)
+
+
+
+
import os
+from launch import EndpointRequest, LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.get_model_endpoint("demo-endpoint-sync")
+response = endpoint.predict(request=EndpointRequest(args={"x": 2, "y": "hello"}))
+print(response)
+
+
+
+
import os
+from launch import EndpointRequest, LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.get_model_endpoint("demo-endpoint-streaming")
+response = endpoint.predict(request=EndpointRequest(args={"x": 2, "y": "hello"}))
+for chunk in response:
+    print(chunk)
+
+
+
+
+ + +
+ + + + +

+ EndpointRequest + + +

+
EndpointRequest(url: Optional[str] = None, args: Optional[Dict] = None, callback_url: Optional[str] = None, callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, callback_auth_username: Optional[str] = None, callback_auth_password: Optional[str] = None, callback_auth_cert: Optional[str] = None, callback_auth_key: Optional[str] = None, return_pickled: Optional[bool] = False, request_id: Optional[str] = None)
+
+ +
+ + +

Represents a single request to either a SyncEndpoint, StreamingEndpoint, or AsyncEndpoint.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
url + Optional[str] + +
+

A url to some file that can be read in to a ModelBundle's predict function. Can be an image, raw text, etc. +Note: the contents of the file located at url are opened as a sequence of bytes and passed +to the predict function. If you instead want to pass the url itself as an input to the predict function, +see args.

+

Exactly one of url and args must be specified.

+
+
+ None +
args + Optional[Dict] + +
+

A Dictionary with arguments to a ModelBundle's predict function. If the predict function has signature +predict_fn(foo, bar), then the keys in the dictionary should be "foo" and "bar". +Values must be native Python objects.

+

Exactly one of url and args must be specified.

+
+
+ None +
return_pickled + Optional[bool] + +
+

Whether the output should be a pickled python object, or directly returned serialized json.

+
+
+ False +
callback_url + Optional[str] + +
+

The callback url to use for this task. If None, then the +default_callback_url of the endpoint is used. The endpoint must specify +"callback" as a post-inference hook for the callback to be triggered.

+
+
+ None +
callback_auth_kind + Optional[Literal['basic', 'mtls']] + +
+

The default callback auth kind to use for async endpoints. +Either "basic" or "mtls". This can be overridden in the task parameters for each +individual task.

+
+
+ None +
callback_auth_username + Optional[str] + +
+

The default callback auth username to use. This only +applies if callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
callback_auth_password + Optional[str] + +
+

The default callback auth password to use. This only +applies if callback_auth_kind is "basic". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
callback_auth_cert + Optional[str] + +
+

The default callback auth cert to use. This only applies +if callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
callback_auth_key + Optional[str] + +
+

The default callback auth key to use. This only applies +if callback_auth_kind is "mtls". This can be overridden in the task +parameters for each individual task.

+
+
+ None +
request_id + Optional[str] + +
+

(deprecated) A user-specifiable id for requests. +Should be unique among EndpointRequests made in the same batch call. +If one isn't provided the client will generate its own.

+
+
+ None +
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + + +

+ EndpointResponseFuture + + +

+
EndpointResponseFuture(client, endpoint_name: str, async_task_id: str)
+
+ +
+ + +

Represents a future response from an Endpoint. Specifically, when the EndpointResponseFuture is ready, +then its get method will return an actual instance of EndpointResponse.

+

This object should not be directly instantiated by the user.

+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
client + +
+

An instance of LaunchClient.

+
+
+ required +
endpoint_name + str + +
+

The name of the endpoint.

+
+
+ required +
async_task_id + str + +
+

An async task id.

+
+
+ required +
+ + + + +
+ + + + + + + + + + +
+ + + + +

+ get + + +

+
get(timeout: Optional[float] = None) -> EndpointResponse
+
+ +
+ +

Retrieves the EndpointResponse for the prediction request after it completes. This method blocks.

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
timeout + Optional[float] + +
+

The maximum number of seconds to wait for the response. If None, then +the method will block indefinitely until the response is ready.

+
+
+ None +
+ +
+ +
+ + + +
+ +
+ +
+ +
+ + + + +

+ EndpointResponse + + +

+
EndpointResponse(client, status: str, result_url: Optional[str] = None, result: Optional[str] = None, traceback: Optional[str] = None)
+
+ +
+ + +

Represents a response received from a Endpoint.

+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
client + +
+

An instance of LaunchClient.

+
+
+ required +
status + str + +
+

A string representing the status of the request, i.e. SUCCESS, FAILURE, or PENDING

+
+
+ required +
result_url + Optional[str] + +
+

A string that is a url containing the pickled python object from the +Endpoint's predict function.

+

Exactly one of result_url or result will be populated, +depending on the value of return_pickled in the request.

+
+
+ None +
result + Optional[str] + +
+

A string that is the serialized return value (in json form) of the Endpoint's predict function. +Specifically, one can json.loads() the value of result to get the original python object back.

+

Exactly one of result_url or result will be populated, +depending on the value of return_pickled in the request.

+
+
+ None +
traceback + Optional[str] + +
+

The stack trace if the inference endpoint raised an error. Can be used for debugging

+
+
+ None +
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + + +

+ EndpointResponseStream + + +

+
EndpointResponseStream(response)
+
+ +
+

+ Bases: Iterator

+ + +

Represents a stream response from an Endpoint. This object is iterable and yields +EndpointResponse objects.

+

This object should not be directly instantiated by the user.

+ + + + +
+ + + + + + + + + + +
+ + + + +

+ __iter__ + + +

+
__iter__()
+
+ +
+ +

Uses server-sent events to iterate through the stream.

+ +
+ +
+ + +
+ + + + +

+ __next__ + + +

+
__next__()
+
+ +
+ +

Uses server-sent events to iterate through the stream.

+ +
+ +
+ + + +
+ +
+ +
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/concepts/model_bundles/index.html b/concepts/model_bundles/index.html new file mode 100644 index 00000000..c2771fa5 --- /dev/null +++ b/concepts/model_bundles/index.html @@ -0,0 +1,1073 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Model Bundles - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Model Bundles

+

Model Bundles are deployable models that can be used to make predictions. They +are created by packaging a model up into a deployable format.

+

Creating Model Bundles

+

There are five methods for creating model bundles: +create_model_bundle_from_callable_v2, +create_model_bundle_from_dirs_v2, +create_model_bundle_from_runnable_image_v2, +create_model_bundle_from_triton_enhanced_runnable_image_v2, +and create_model_bundle_from_streaming_enhanced_runnable_image_v2.

+

The first directly pickles a user-specified load_predict_fn, a function which +loads the model and returns a predict_fn, a function which takes in a request. +The second takes in directories containing a load_predict_fn and the +module path to the load_predict_fn. +The third takes a Docker image and a command that starts a process listening for +requests at port 5005 using HTTP and exposes POST /predict and +GET /readyz endpoints. +The fourth is a variant of the third that also starts an instance of the NVidia +Triton framework for efficient model serving. +The fifth is a variant of the third that responds with a stream of SSEs at POST /stream (the user +can decide whether POST /predict is also exposed).

+

Each of these modes of creating a model bundle is called a "Flavor".

+
+

Info

+

Choosing the right model bundle flavor

+

Here are some tips for how to choose between the different flavors of ModelBundle:

+

A CloudpickleArtifactFlavor (creating from callable) is good if:

+
    +
  • You are creating the model bundle from a Jupyter notebook.
  • +
  • The model bundle is small without too many dependencies.
  • +
+

A ZipArtifactFlavor (creating from directories) is good if:

+
    +
  • You have a relatively constant set of dependencies.
  • +
  • You have a lot of custom code that you want to include in the model bundle.
  • +
  • You do not want to build a web server and Docker image to serve your model.
  • +
+

A RunnableImageFlavor (creating from runnable image) is good if:

+
    +
  • You have a lot of dependencies.
  • +
  • You have a lot of custom code that you want to include in the model bundle.
  • +
  • You are comfortable with building a web server and Docker image to serve your model.
  • +
+

A TritonEnhancedRunnableImageFlavor (a runnable image variant) is good if:

+
    +
  • You want to use a RunnableImageFlavor
  • +
  • You also want to use NVidia's tritonserver to accelerate model inference
  • +
+

A StreamingEnhancedRunnableImageFlavor (a runnable image variant) is good if:

+
    +
  • You want to use a RunnableImageFlavor
  • +
  • You also want to support token streaming while the model is generating
  • +
+
+
+
+
+
import os
+from pydantic import BaseModel
+from launch import LaunchClient
+
+
+class MyRequestSchema(BaseModel):
+    x: int
+    y: str
+
+class MyResponseSchema(BaseModel):
+    __root__: int
+
+
+def my_load_predict_fn(model):
+    def returns_model_of_x_plus_len_of_y(x: int, y: str) -> int:
+        """MyRequestSchema -> MyResponseSchema"""
+        assert isinstance(x, int) and isinstance(y, str)
+        return model(x) + len(y)
+
+    return returns_model_of_x_plus_len_of_y
+
+
+def my_load_model_fn():
+    def my_model(x):
+        return x * 2
+
+    return my_model
+
+BUNDLE_PARAMS = {
+    "model_bundle_name": "test-bundle",
+    "load_model_fn": my_load_model_fn,
+    "load_predict_fn": my_load_predict_fn,
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "requirements": ["pytest==7.2.1", "numpy"],  # list your requirements here
+    "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime",
+}
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS)
+
+
+
+
import os
+import tempfile
+from pydantic import BaseModel
+from launch import LaunchClient
+
+directory = tempfile.mkdtemp()
+model_filename = os.path.join(directory, "model.py")
+with open(model_filename, "w") as f:
+    f.write("""def my_load_model_fn(deserialized_config):
+    def my_model(x):
+        return x * 2
+
+    return my_model
+""")
+
+predict_filename = os.path.join(directory, "predict.py")
+with open(predict_filename, "w") as f:
+    f.write("""def my_load_predict_fn(deserialized_config, model):
+    def returns_model_of_x_plus_len_of_y(x: int, y: str) -> int:
+        assert isinstance(x, int) and isinstance(y, str)
+        return model(x) + len(y)
+
+    return returns_model_of_x_plus_len_of_y
+""")
+
+requirements_filename = os.path.join(directory, "requirements.txt")
+with open(requirements_filename, "w") as f:
+    f.write("""
+pytest==7.2.1
+numpy
+""")
+
+"""
+The directory structure should now look like
+
+directory/
+    model.py
+    predict.py
+    requirements.txt
+"""
+
+
+class MyRequestSchema(BaseModel):
+    x: int
+    y: str
+
+class MyResponseSchema(BaseModel):
+    __root__: int
+
+print(directory)
+print(model_filename)
+print(predict_filename)
+print(requirements_filename)
+
+BUNDLE_PARAMS = {
+    "model_bundle_name": "test-bundle-from-dirs",
+    "base_paths": [directory],
+    "load_predict_fn_module_path": f"{os.path.basename(directory)}.predict.my_load_predict_fn",
+    "load_model_fn_module_path": f"{os.path.basename(directory)}.model.my_load_model_fn",
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "requirements_path": requirements_filename,
+    "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime",
+}
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+client.create_model_bundle_from_dirs_v2(**BUNDLE_PARAMS)
+
+# Clean up files from demo
+os.remove(model_filename)
+os.remove(predict_filename)
+os.remove(requirements_filename)
+os.rmdir(directory)
+
+
+
+
import os
+from pydantic import BaseModel
+from launch import LaunchClient
+
+
+class MyRequestSchema(BaseModel):
+    x: int
+    y: str
+
+class MyResponseSchema(BaseModel):
+    __root__: int
+
+
+BUNDLE_PARAMS = {
+    "model_bundle_name": "test-bundle",
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "repository": "...",
+    "tag": "...",
+    "command": ...,
+    "predict_route": "/predict",
+    "healthcheck_route": "/readyz",
+    "env": {
+        "TEST_KEY": "test_value",
+    },
+    "readiness_initial_delay_seconds": 30,
+}
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+client.create_model_bundle_from_runnable_image_v2(**BUNDLE_PARAMS)
+
+
+
+
import os
+from pydantic import BaseModel
+from launch import LaunchClient
+
+
+class MyRequestSchema(BaseModel):
+    x: int
+    y: str
+
+class MyResponseSchema(BaseModel):
+    __root__: int
+
+
+BUNDLE_PARAMS = {
+    "model_bundle_name": "test-triton-bundle",
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "repository": "...",
+    "tag": "...",
+    "command": ...,
+    "predict_route": "/predict",
+    "healthcheck_route": "/readyz",
+    "env": {
+        "TEST_KEY": "test_value",
+    },
+    "readiness_initial_delay_seconds": 30,
+    "triton_model_repository": "...",
+    "triton_model_replicas": {"": ""},
+    "triton_num_cpu": 4.0,
+    "triton_commit_tag": "",
+    "triton_storage": "",
+    "triton_memory": "",
+    "triton_readiness_initial_delay_seconds": 300,
+}
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+client.create_model_bundle_from_triton_enhanced_runnable_image_v2(**BUNDLE_PARAMS)
+
+
+
+
import os
+from pydantic import BaseModel
+from launch import LaunchClient
+
+
+class MyRequestSchema(BaseModel):
+    x: int
+    y: str
+
+class MyResponseSchema(BaseModel):
+    __root__: int
+
+
+BUNDLE_PARAMS = {
+    "model_bundle_name": "test-streaming-bundle",
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "repository": "...",
+    "tag": "...",
+    "command": ...,  # optional; if provided, will also expose the /predict endpoint
+    "predict_route": "/predict",
+    "healthcheck_route": "/readyz",
+    "streaming_command": ...,  # required
+    "streaming_predict_route": "/stream",
+    "env": {
+        "TEST_KEY": "test_value",
+    },
+    "readiness_initial_delay_seconds": 30,
+}
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+client.create_model_bundle_from_streaming_enhanced_runnable_image_v2(**BUNDLE_PARAMS)
+
+
+
+
+

Configuring Model Bundles

+

The app_config field of a model bundle is a dictionary that can be used to +configure the model bundle. If specified, the app_config is passed to the +load_predict_fn when the model bundle is deployed, alongside the model. This +can allow for more code reuse between multiple bundles that perform similar +tasks.

+
Creating Model Bundles with app_config
import os
+from launch import LaunchClient
+from pydantic import BaseModel
+from typing import List, Union
+from typing_extensions import Literal
+
+
+class MyRequestSchemaSingle(BaseModel):
+    kind: Literal['single']
+    x: int
+    y: str
+
+class MyRequestSchemaBatched(BaseModel):
+    kind: Literal['batched']
+    x: List[int]
+    y: List[str]
+
+class MyRequestSchema(BaseModel):
+    __root__: Union[MyRequestSchemaSingle, MyRequestSchemaBatched]
+
+class MyResponseSchema(BaseModel):
+    __root__: Union[int, List[int]]
+
+
+def my_load_predict_fn(app_config, model):
+    def returns_model_of_x_plus_len_of_y(x: Union[int, List[int]], y: Union[str, List[str]]) -> Union[int, List[int]]:
+        """MyRequestSchema -> MyResponseSchema"""
+        if app_config["mode"] == "single":
+            assert isinstance(x, int) and isinstance(y, str)
+            return model(x) + len(y)
+
+        result = []
+        for x_i, y_i in zip(x, y):
+            result.append(model(x_i) + len(y_i))
+        return result
+
+    return returns_model_of_x_plus_len_of_y
+
+
+def my_load_model_fn(app_config):
+    def my_model_single(x: int):
+        return x * 2
+
+    def my_model_batched(x: List[int]):
+        return [my_model_single(x_i) for x_i in x]
+
+    if app_config["mode"] == "single":
+        return my_model_single
+
+    return my_model_batched
+
+
+BUNDLE_PARAMS_SINGLE = {
+    "model_bundle_name": "test-bundle-single",
+    "load_predict_fn": my_load_predict_fn,
+    "load_model_fn": my_load_model_fn,
+    "requirements": ["pytest==7.2.1", "numpy"],
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime",
+    "app_config": {"mode": "single"},
+}
+BUNDLE_PARAMS_BATCHED = {
+    "model_bundle_name": "test-bundle-batched",
+    "load_predict_fn": my_load_predict_fn,
+    "load_model_fn": my_load_model_fn,
+    "requirements": ["pytest==7.2.1", "numpy"],
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime",
+    "app_config": {"mode": "batched"},
+}
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+bundle_single = client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS_SINGLE)
+bundle_batch = client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS_BATCHED)
+
+

Updating Model Bundles

+

Model Bundles are immutable, meaning they cannot be edited once created. +However, it is possible to clone an existing model bundle with a new app_config +using +clone_model_bundle_with_changes_v2.

+

Listing Model Bundles

+

To list all the model bundles you own, use +list_model_bundles_v2.

+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/concepts/model_endpoints/index.html b/concepts/model_endpoints/index.html new file mode 100644 index 00000000..fd29989f --- /dev/null +++ b/concepts/model_endpoints/index.html @@ -0,0 +1,795 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Model Endpoints - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Model Endpoints

+

Model Endpoints are deployments of models that can receive requests and return +predictions containing the results of the model's inference. Each model endpoint +is associated with a model bundle, which contains the model's code. An endpoint +specifies deployment parameters, such as the minimum and maximum number of +workers, as well as the requested resources for each worker, such as the number +of CPUs, amount of memory, GPU count, and type of GPU.

+

Endpoints can be asynchronous, synchronous, or streaming. Asynchronous endpoints return +a future immediately after receiving a request, and the future can be used to +retrieve the prediction once it is ready. Synchronous endpoints return the +prediction directly after receiving a request. Streaming endpoints are variants of synchronous +endpoints that return a stream of SSEs instead of a single HTTP response.

+
+

Info

+

Choosing the right inference mode

+

Here are some tips for how to choose between SyncEndpoint, StreamingEndpoint, AsyncEndpoint, and BatchJob for deploying your ModelBundle:

+

A SyncEndpoint is good if:

+
    +
  • You have strict latency requirements (e.g. on the order of seconds or less).
  • +
  • You are willing to have resources continually allocated.
  • +
+

A StreamingEndpoint is good if:

+
    +
  • You have stricter requirements on perceived latency than SyncEndpoint can support (e.g. you want tokens generated by the model to start being returned almost immediately rather than waiting for the model generation to finish).
  • +
  • You are willing to have resources continually allocated.
  • +
+

An AsyncEndpoint is good if:

+
    +
  • You want to save on compute costs.
  • +
  • Your inference code takes a long time to run.
  • +
  • Your latency requirements are on the order of minutes.
  • +
+

A BatchJob is good if:

+
    +
  • You know there is a large batch of inputs ahead of time.
  • +
  • You want to optimize for throughput instead of latency.
  • +
+
+

Creating Async Model Endpoints

+

Async model endpoints are the most cost-efficient way to perform inference on +tasks that are less latency-sensitive.

+
Creating an Async Model Endpoint
import os
+from launch import LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.create_model_endpoint(
+    endpoint_name="demo-endpoint-async",
+    model_bundle="test-bundle",
+    cpus=1,
+    min_workers=0,
+    endpoint_type="async",
+    update_if_exists=True,
+    labels={
+        "team": "MY_TEAM",
+        "product": "MY_PRODUCT",
+    },
+)
+
+

Creating Sync Model Endpoints

+

Sync model endpoints are useful for latency-sensitive tasks, such as real-time +inference. Sync endpoints are more expensive than async endpoints.

+
+

Note

+

Sync model endpoints require at least 1 min_worker.

+
+
Creating a Sync Model Endpoint
import os
+from launch import LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.create_model_endpoint(
+    endpoint_name="demo-endpoint-sync",
+    model_bundle="test-bundle",
+    cpus=1,
+    min_workers=1,
+    endpoint_type="sync",
+    update_if_exists=True,
+    labels={
+        "team": "MY_TEAM",
+        "product": "MY_PRODUCT",
+    },
+)
+
+

Creating Streaming Model Endpoints

+

Streaming model endpoints are variants of sync model endpoints that are useful for tasks with strict +requirements on perceived latency. Streaming endpoints are more expensive than async endpoints.

+
+

Note

+

Streaming model endpoints require at least 1 min_worker.

+
+
Creating a Streaming Model Endpoint
import os
+from launch import LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.create_model_endpoint(
+    endpoint_name="demo-endpoint-streaming",
+    model_bundle="test-streaming-bundle",
+    cpus=1,
+    min_workers=1,
+    per_worker=1,
+    endpoint_type="streaming",
+    update_if_exists=True,
+    labels={
+        "team": "MY_TEAM",
+        "product": "MY_PRODUCT",
+    },
+)
+
+

Managing Model Endpoints

+

Model endpoints can be listed, updated, and deleted using the Launch API.

+
Listing Model Endpoints
import os
+from launch import LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoints = client.list_model_endpoints()
+
+
Updating a Model Endpoint
import os
+from launch import LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+client.edit_model_endpoint(
+    model_endpoint="demo-endpoint-sync",
+    max_workers=2,
+)
+
+
Deleting a Model Endpoint
import time
+import os
+from launch import LaunchClient
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+endpoint = client.create_model_endpoint(
+    endpoint_name="demo-endpoint-tmp",
+    model_bundle="test-bundle",
+    cpus=1,
+    min_workers=0,
+    endpoint_type="async",
+    update_if_exists=True,
+    labels={
+        "team": "MY_TEAM",
+        "product": "MY_PRODUCT",
+    },
+)
+time.sleep(15)  # Wait for Launch to build the endpoint
+client.delete_model_endpoint(model_endpoint_name="demo-endpoint-tmp")
+
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/concepts/overview/index.html b/concepts/overview/index.html new file mode 100644 index 00000000..98a4ac5b --- /dev/null +++ b/concepts/overview/index.html @@ -0,0 +1,669 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Overview

+

Creating deployments on Launch generally involves three steps:

+
    +
  1. +

    Create and upload a ModelBundle. Pass your trained model + as well as pre-/post-processing code to the Scale Launch Python client, and + we’ll create a model bundle based on the code and store it in our Bundle Store.

    +
  2. +
  3. +

    Create a ModelEndpoint. Pass a ModelBundle as well as + infrastructure settings such as the desired number of GPUs to our client. + This provisions resources on Scale’s cluster dedicated to your ModelEndpoint.

    +
  4. +
  5. +

    Make requests to the ModelEndpoint. You can make requests through the Python + client, or make HTTP requests directly to Scale.

    +
  6. +
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/docs/.buildinfo b/docs/.buildinfo deleted file mode 100644 index 616492f4..00000000 --- a/docs/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 62d9bd11a532420ce68eed803eb621b2 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/api/client.md b/docs/api/client.md deleted file mode 100644 index 3b25b240..00000000 --- a/docs/api/client.md +++ /dev/null @@ -1,3 +0,0 @@ -# Launch Client - -::: launch.client.LaunchClient diff --git a/docs/api/endpoint_predictions.md b/docs/api/endpoint_predictions.md deleted file mode 100644 index 8e31703b..00000000 --- a/docs/api/endpoint_predictions.md +++ /dev/null @@ -1,6 +0,0 @@ -# Endpoint Predictions - -::: launch.model_endpoint.EndpointRequest -::: launch.model_endpoint.EndpointResponse -::: launch.model_endpoint.EndpointResponseFuture -::: launch.model_endpoint.EndpointResponseStream diff --git a/docs/api/hooks.md b/docs/api/hooks.md deleted file mode 100644 index 7f12fcda..00000000 --- a/docs/api/hooks.md +++ /dev/null @@ -1,2 +0,0 @@ - -::: launch.hooks.PostInferenceHooks diff --git a/docs/api/llms.md b/docs/api/llms.md deleted file mode 100644 index 9429868c..00000000 --- a/docs/api/llms.md +++ /dev/null @@ -1,42 +0,0 @@ -# LLM APIs - -We provide some APIs to conveniently create, list and inference with LLMs. Under the hood they are Launch model endpoints. - -## Example - -```py title="LLM APIs Usage" -import os - -from rich import print - -from launch import LaunchClient -from launch.api_client.model.llm_inference_framework import ( - LLMInferenceFramework, -) -from launch.api_client.model.llm_source import LLMSource - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"), endpoint=os.getenv("LAUNCH_ENDPOINT")) - -endpoints = client.list_llm_model_endpoints() - -print(endpoints) - -endpoint_name = "test-flan-t5-xxl" -client.create_llm_model_endpoint( - endpoint_name=endpoint_name, - model_name="flan-t5-xxl", - source=LLMSource.HUGGING_FACE, - inference_framework=LLMInferenceFramework.DEEPSPEED, - inference_framework_image_tag=os.getenv("INFERENCE_FRAMEWORK_IMAGE_TAG"), - num_shards=4, - min_workers=1, - max_workers=1, - gpus=4, - endpoint_type="sync", -) - -# Wait for the endpoint to be ready - -output = client.completions_sync(endpoint_name, prompt="What is Deep Learning?", max_new_tokens=10, temperature=0) -print(output) -``` diff --git a/docs/api/model_bundles.md b/docs/api/model_bundles.md deleted file mode 100644 index 4b864c41..00000000 --- a/docs/api/model_bundles.md +++ /dev/null @@ -1,21 +0,0 @@ -# Model Bundles - -::: launch.model_bundle.CloudpickleArtifactFlavor - -::: launch.model_bundle.CreateModelBundleV2Response - -::: launch.model_bundle.CustomFramework - -::: launch.model_bundle.ListModelBundlesV2Response - -::: launch.model_bundle.ModelBundle - -::: launch.model_bundle.ModelBundleV2Response - -::: launch.model_bundle.PytorchFramework - -::: launch.model_bundle.RunnableImageFlavor - -::: launch.model_bundle.TensorflowFramework - -::: launch.model_bundle.ZipArtifactFlavor diff --git a/docs/api/model_endpoints.md b/docs/api/model_endpoints.md deleted file mode 100644 index 88730f28..00000000 --- a/docs/api/model_endpoints.md +++ /dev/null @@ -1,9 +0,0 @@ -# Model Endpoints - -All classes here are returned by the -[`get_model_endpoint`](/api/client/#launch.client.LaunchClient.get_model_endpoint) -method and provide a `predict` function. - -::: launch.model_endpoint.AsyncEndpoint -::: launch.model_endpoint.SyncEndpoint -::: launch.model_endpoint.StreamingEndpoint diff --git a/docs/cli.md b/docs/cli.md deleted file mode 100644 index 9de776d9..00000000 --- a/docs/cli.md +++ /dev/null @@ -1,12 +0,0 @@ -*Launch* comes with a CLI for listing bundles / endpoints, editing endpoints, -and sending tasks to endpoints. - -The CLI can be used as `scale-launch ...`. - -## Help - -Run `scale-launch --help` for more options. - -```{title="scale-launch --help"} -{! docs/cli_help.txt !} -``` diff --git a/docs/cli_help.txt b/docs/cli_help.txt deleted file mode 100644 index b68edb05..00000000 --- a/docs/cli_help.txt +++ /dev/null @@ -1,21 +0,0 @@ - - This is the command line interface (CLI) package for Scale Launch. - - ██╗ █████╗ ██╗ ██╗███╗ ██╗ ██████╗██╗ ██╗ - ██║ ██╔══██╗██║ ██║████╗ ██║██╔════╝██║ ██║ - ██║ ███████║██║ ██║██╔██╗ ██║██║ ███████║ - ██║ ██╔══██║██║ ██║██║╚██╗██║██║ ██╔══██║ - ███████╗██║ ██║╚██████╔╝██║ ╚████║╚██████╗██║ ██║ - ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚═╝ ╚═╝ - -Usage: scale-launch [OPTIONS] COMMAND [ARGS]... - -Options: - --help Show this message and exit. - -Commands: - batch-jobs Batch Jobs is a wrapper around batch jobs in Scale Launch - bundles Bundles is a wrapper around model bundles in Scale Launch - config Config is a wrapper around getting and setting your API key and other configuration options - endpoints Endpoints is a wrapper around model endpoints in Scale Launch - tasks Tasks is a wrapper around sending requests to endpoints diff --git a/docs/concepts/batch_jobs.md b/docs/concepts/batch_jobs.md deleted file mode 100644 index ac80c825..00000000 --- a/docs/concepts/batch_jobs.md +++ /dev/null @@ -1,44 +0,0 @@ -# Batch Jobs - -For predicting over a larger set of tasks (> 50) at once, it is recommended to -use batch jobs. Batch jobs are a way to send a large number of tasks to a model -bundle. The tasks are processed in parallel, and the results are returned as a -list of predictions. - -Batch jobs are created using the -[`batch_async_request`](/api/client/#launch.client.LaunchClient.batch_async_request) -method of the -[`LaunchClient`](/api/client/#launch.client.LaunchClient). - -```py title="Creating and Following a Batch Job" -import logging -import os -import time -from launch import LaunchClient - -logger = logging.getLogger(__name__) - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -batch_job = client.batch_async_request( - model_bundle="test-bundle", - inputs=[ - {"x": 2, "y": "hello"}, - {"x": 3, "y": "world"}, - ], - gpus=0, - labels={ - "team": "MY_TEAM", - "product": "MY_PRODUCT", - } -) - -status = "PENDING" -res = None -while status != "SUCCESS" and status != "FAILURE" and status != "CANCELLED": - time.sleep(30) - res = client.get_batch_async_response(batch_job["job_id"]) - status = res["status"] - logging.info(f"the batch job is {status}") - -logging.info(res) -``` diff --git a/docs/concepts/callbacks.md b/docs/concepts/callbacks.md deleted file mode 100644 index abae84e9..00000000 --- a/docs/concepts/callbacks.md +++ /dev/null @@ -1,163 +0,0 @@ -# Callbacks - -Async model endpoints can be configured to send callbacks to a user-defined -callback URL. Callbacks are sent as HTTP POST requests with a JSON body. The -following code snippet shows how to create an async model endpoint with a -callback URL. - -To configure an async endpoint to send callbacks, set the `post_inference_hooks` -field to include -[`launch.PostInferenceHooks.CALLBACK`](/api/hooks/#launch.hooks.PostInferenceHooks). -A callback URL also needs to be specified, and it can be configured as a default -using the `default_callback_url` argument to -[`launch.LaunchClient.create_model_endpoint`](/api/client/#launch.LaunchClient.create_model_endpoint) -or as a per-task override using the `callback_url` field of -[`launch.EndpointRequest`](/api/model_endpoints/#launch.model_predictions.EndpointRequest). - -!!! Note - Callbacks will not be sent if the endpoint does not have any post-inference - hooks specified, even if a `default_callback_url` is provided to the endpoint - creation method or if the prediction request has a `callback_url` override. - - -```py title="Creating an Async Model Endpoint with a Callback URL" hl_lines="17-18 37" -import os -import time -from launch import EndpointRequest, LaunchClient, PostInferenceHooks - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -endpoint = client.create_model_endpoint( - endpoint_name="demo-endpoint-callback", - model_bundle="test-bundle", - cpus=1, - min_workers=1, - endpoint_type="async", - update_if_exists=True, - labels={ - "team": "MY_TEAM", - "product": "MY_PRODUCT", - }, - post_inference_hooks=[PostInferenceHooks.CALLBACK], - default_callback_url="https://example.com", -) - -while endpoint.status() != "READY": - time.sleep(10) - -future_default = endpoint.predict( - request=EndpointRequest(args={"x": 2, "y": "hello"}) -) -""" -A callback is sent to https://example.com with the following JSON body: -{ - "task_id": "THE_TASK_ID", - "result": 7 -} -""" - -future_custom_callback_url = endpoint.predict( - request=EndpointRequest( - args={"x": 3, "y": "hello"}, callback_url="https://example.com/custom" - ), -) - -""" -A callback is sent to https://example.com/custom with the following JSON body: -{ - "task_id": "THE_TASK_ID", - "result": 8 -} -""" -``` - -## Authentication for callbacks - -!!! Warning - This feature is currently in beta, and the API is likely to change. - -Callbacks can be authenticated using shared authentication headers. To enable authentication, -set either `default_callback_auth_kind` when creating the endpoint or `callback_auth_kind` -when making a prediction request. - -Currently, the supported authentication methods are `basic` and `mtls`. If `basic` is used, -then the `default_callback_auth_username` and `default_callback_auth_password` fields must be -specified when creating the endpoint, or the `callback_auth_username` and `callback_auth_password` -fields must be specified when making a prediction request. If `mtls` is used, then the -same is true for the `default_callback_auth_cert` and `default_callback_auth_key` fields, -or the `callback_auth_cert` and `callback_auth_key` fields. - -```py title="Creating an Async Model Endpoint with custom Callback auth" hl_lines="18-21 37-39 48-50 66-68" -import os -import time -from launch import EndpointRequest, LaunchClient, PostInferenceHooks - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -endpoint = client.create_model_endpoint( - endpoint_name="demo-endpoint-callback", - model_bundle="test-bundle", - cpus=1, - min_workers=1, - endpoint_type="async", - update_if_exists=True, - labels={ - "team": "MY_TEAM", - "product": "MY_PRODUCT", - }, - post_inference_hooks=[PostInferenceHooks.CALLBACK], - default_callback_url="https://example.com", - default_callback_auth_kind="basic", - default_callback_auth_username="user", - default_callback_auth_password="password", -) - -while endpoint.status() != "READY": - time.sleep(10) - -future_default = endpoint.predict( - request=EndpointRequest(args={"x": 2, "y": "hello"}) -) -""" -A callback is sent to https://example.com with ("user", "password") as the basic auth. -""" - -future_custom_callback_auth = endpoint.predict( - request=EndpointRequest( - args={"x": 3, "y": "hello"}, - callback_auth_kind="mtls", - callback_auth_cert="cert", - callback_auth_key="key", - ), -) -""" -A callback is sent with mTLS authentication. -""" - -client.edit_model_endpoint( - model_endpoint=endpoint.model_endpoint, - default_callback_auth_kind="mtls", - default_callback_auth_cert="cert", - default_callback_auth_key="key", -) - -while endpoint.status() != "READY": - time.sleep(10) - -future_default = endpoint.predict( - request=EndpointRequest(args={"x": 2, "y": "hello"}) -) -""" -A callback is sent with mTLS auth. -""" - -future_custom_callback_auth = endpoint.predict( - request=EndpointRequest( - args={"x": 3, "y": "hello"}, - callback_auth_kind="basic", - callback_auth_username="user", - callback_auth_password="pass", - ), -) -""" -A callback is sent with ("user", "pass") as the basic auth. -""" -``` diff --git a/docs/concepts/endpoint_predictions.md b/docs/concepts/endpoint_predictions.md deleted file mode 100644 index 34d8af2f..00000000 --- a/docs/concepts/endpoint_predictions.md +++ /dev/null @@ -1,44 +0,0 @@ -# Endpoint Predictions - -Once endpoints have been created, users can send tasks to them to make -predictions. The following code snippet shows how to send tasks to endpoints. - -=== "Sending a Task to an Async Endpoint" - ```py - import os - from launch import EndpointRequest, LaunchClient - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - endpoint = client.get_model_endpoint("demo-endpoint-async") - future = endpoint.predict(request=EndpointRequest(args={"x": 2, "y": "hello"})) - response = future.get() - print(response) - ``` - -=== "Sending a Task to a Sync Endpoint" - ```py - import os - from launch import EndpointRequest, LaunchClient - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - endpoint = client.get_model_endpoint("demo-endpoint-sync") - response = endpoint.predict(request=EndpointRequest(args={"x": 2, "y": "hello"})) - print(response) - ``` - -=== "Sending a Task to a Streaming Endpoint" - ```py - import os - from launch import EndpointRequest, LaunchClient - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - endpoint = client.get_model_endpoint("demo-endpoint-streaming") - response = endpoint.predict(request=EndpointRequest(args={"x": 2, "y": "hello"})) - for chunk in response: - print(chunk) - ``` - -::: launch.model_endpoint.EndpointRequest -::: launch.model_endpoint.EndpointResponseFuture -::: launch.model_endpoint.EndpointResponse -::: launch.model_endpoint.EndpointResponseStream diff --git a/docs/concepts/model_bundles.md b/docs/concepts/model_bundles.md deleted file mode 100644 index 6928abab..00000000 --- a/docs/concepts/model_bundles.md +++ /dev/null @@ -1,394 +0,0 @@ -# Model Bundles - -Model Bundles are deployable models that can be used to make predictions. They -are created by packaging a model up into a deployable format. - -## Creating Model Bundles - -There are five methods for creating model bundles: -[`create_model_bundle_from_callable_v2`](/api/client/#launch.client.LaunchClient.create_model_bundle_from_callable_v2), -[`create_model_bundle_from_dirs_v2`](/api/client/#launch.client.LaunchClient.create_model_bundle_from_dirs_v2), -[`create_model_bundle_from_runnable_image_v2`](/api/client/#launch.client.LaunchClient.create_model_bundle_from_runnable_image_v2), -[`create_model_bundle_from_triton_enhanced_runnable_image_v2`](/api/client/#launch.client.LaunchClient.create_model_bundle_from_triton_enhanced_runnable_image_v2), -and [`create_model_bundle_from_streaming_enhanced_runnable_image_v2`](/api/client/#launch.client.LaunchClient.create_model_bundle_from_streaming_enhanced_runnable_image_v2). - -The first directly pickles a user-specified `load_predict_fn`, a function which -loads the model and returns a `predict_fn`, a function which takes in a request. -The second takes in directories containing a `load_predict_fn` and the -module path to the `load_predict_fn`. -The third takes a Docker image and a command that starts a process listening for -requests at port 5005 using HTTP and exposes `POST /predict` and -`GET /readyz` endpoints. -The fourth is a variant of the third that also starts an instance of the NVidia -Triton framework for efficient model serving. -The fifth is a variant of the third that responds with a stream of SSEs at `POST /stream` (the user -can decide whether `POST /predict` is also exposed). - -Each of these modes of creating a model bundle is called a "Flavor". - -!!! info - # Choosing the right model bundle flavor - - Here are some tips for how to choose between the different flavors of ModelBundle: - - A `CloudpickleArtifactFlavor` (creating from callable) is good if: - - * You are creating the model bundle from a Jupyter notebook. - * The model bundle is small without too many dependencies. - - A `ZipArtifactFlavor` (creating from directories) is good if: - - * You have a relatively constant set of dependencies. - * You have a lot of custom code that you want to include in the model bundle. - * You do not want to build a web server and Docker image to serve your model. - - A `RunnableImageFlavor` (creating from runnable image) is good if: - - * You have a lot of dependencies. - * You have a lot of custom code that you want to include in the model bundle. - * You are comfortable with building a web server and Docker image to serve your model. - - - A `TritonEnhancedRunnableImageFlavor` (a runnable image variant) is good if: - - * You want to use a `RunnableImageFlavor` - * You also want to use [NVidia's `tritonserver`](https://developer.nvidia.com/nvidia-triton-inference-server) to accelerate model inference - - A `StreamingEnhancedRunnableImageFlavor` (a runnable image variant) is good if: - - * You want to use a `RunnableImageFlavor` - * You also want to support token streaming while the model is generating - -=== "Creating From Callables" - ```py - import os - from pydantic import BaseModel, RootModel - from launch import LaunchClient - - - class MyRequestSchema(BaseModel): - x: int - y: str - - class MyResponseSchema(RootModel): - root: int - - - def my_load_predict_fn(model): - def returns_model_of_x_plus_len_of_y(x: int, y: str) -> int: - """MyRequestSchema -> MyResponseSchema""" - assert isinstance(x, int) and isinstance(y, str) - return model(x) + len(y) - - return returns_model_of_x_plus_len_of_y - - - def my_load_model_fn(): - def my_model(x): - return x * 2 - - return my_model - - BUNDLE_PARAMS = { - "model_bundle_name": "test-bundle", - "load_model_fn": my_load_model_fn, - "load_predict_fn": my_load_predict_fn, - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "requirements": ["pytest==7.2.1", "numpy"], # list your requirements here - "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime", - } - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS) - ``` - -=== "Creating From Directories" - ```py - import os - import tempfile - from pydantic import BaseModel, RootModel - from launch import LaunchClient - - directory = tempfile.mkdtemp() - model_filename = os.path.join(directory, "model.py") - with open(model_filename, "w") as f: - f.write("""def my_load_model_fn(deserialized_config): - def my_model(x): - return x * 2 - - return my_model - """) - - predict_filename = os.path.join(directory, "predict.py") - with open(predict_filename, "w") as f: - f.write("""def my_load_predict_fn(deserialized_config, model): - def returns_model_of_x_plus_len_of_y(x: int, y: str) -> int: - assert isinstance(x, int) and isinstance(y, str) - return model(x) + len(y) - - return returns_model_of_x_plus_len_of_y - """) - - requirements_filename = os.path.join(directory, "requirements.txt") - with open(requirements_filename, "w") as f: - f.write(""" - pytest==7.2.1 - numpy - """) - - """ - The directory structure should now look like - - directory/ - model.py - predict.py - requirements.txt - """ - - - class MyRequestSchema(BaseModel): - x: int - y: str - - class MyResponseSchema(RootModel): - root: int - - print(directory) - print(model_filename) - print(predict_filename) - print(requirements_filename) - - BUNDLE_PARAMS = { - "model_bundle_name": "test-bundle-from-dirs", - "base_paths": [directory], - "load_predict_fn_module_path": f"{os.path.basename(directory)}.predict.my_load_predict_fn", - "load_model_fn_module_path": f"{os.path.basename(directory)}.model.my_load_model_fn", - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "requirements_path": requirements_filename, - "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime", - } - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - client.create_model_bundle_from_dirs_v2(**BUNDLE_PARAMS) - - # Clean up files from demo - os.remove(model_filename) - os.remove(predict_filename) - os.remove(requirements_filename) - os.rmdir(directory) - ``` - -=== "Creating From a Runnable Image" - ```py - import os - from pydantic import BaseModel, RootModel - from launch import LaunchClient - - - class MyRequestSchema(BaseModel): - x: int - y: str - - class MyResponseSchema(RootModel): - root: int - - - BUNDLE_PARAMS = { - "model_bundle_name": "test-bundle", - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "repository": "...", - "tag": "...", - "command": ..., - "predict_route": "/predict", - "healthcheck_route": "/readyz", - "env": { - "TEST_KEY": "test_value", - }, - "readiness_initial_delay_seconds": 30, - } - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - client.create_model_bundle_from_runnable_image_v2(**BUNDLE_PARAMS) - ``` - - -=== "Creating From a Triton Enhanced Runnable Image" - ```py - import os - from pydantic import BaseModel, RootModel - from launch import LaunchClient - - - class MyRequestSchema(BaseModel): - x: int - y: str - - class MyResponseSchema(RootModel): - root: int - - - BUNDLE_PARAMS = { - "model_bundle_name": "test-triton-bundle", - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "repository": "...", - "tag": "...", - "command": ..., - "predict_route": "/predict", - "healthcheck_route": "/readyz", - "env": { - "TEST_KEY": "test_value", - }, - "readiness_initial_delay_seconds": 30, - "triton_model_repository": "...", - "triton_model_replicas": {"": ""}, - "triton_num_cpu": 4.0, - "triton_commit_tag": "", - "triton_storage": "", - "triton_memory": "", - "triton_readiness_initial_delay_seconds": 300, - } - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - client.create_model_bundle_from_triton_enhanced_runnable_image_v2(**BUNDLE_PARAMS) - ``` - - -=== "Creating From a Streaming Enhanced Runnable Image" - ```py - import os - from pydantic import BaseModel, RootModel - from launch import LaunchClient - - - class MyRequestSchema(BaseModel): - x: int - y: str - - class MyResponseSchema(RootModel): - root: int - - - BUNDLE_PARAMS = { - "model_bundle_name": "test-streaming-bundle", - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "repository": "...", - "tag": "...", - "command": ..., # optional; if provided, will also expose the /predict endpoint - "predict_route": "/predict", - "healthcheck_route": "/readyz", - "streaming_command": ..., # required - "streaming_predict_route": "/stream", - "env": { - "TEST_KEY": "test_value", - }, - "readiness_initial_delay_seconds": 30, - } - - client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - client.create_model_bundle_from_streaming_enhanced_runnable_image_v2(**BUNDLE_PARAMS) - ``` - - -## Configuring Model Bundles - -The `app_config` field of a model bundle is a dictionary that can be used to -configure the model bundle. If specified, the `app_config` is passed to the -`load_predict_fn` when the model bundle is deployed, alongside the `model`. This -can allow for more code reuse between multiple bundles that perform similar -tasks. - -```py title="Creating Model Bundles with app_config" -import os -from launch import LaunchClient -from pydantic import BaseModel, RootModel -from typing import List, Union -from typing_extensions import Literal - - -class MyRequestSchemaSingle(BaseModel): - kind: Literal['single'] - x: int - y: str - -class MyRequestSchemaBatched(BaseModel): - kind: Literal['batched'] - x: List[int] - y: List[str] - -class MyRequestSchema(RootModel): - root: Union[MyRequestSchemaSingle, MyRequestSchemaBatched] - -class MyResponseSchema(RootModel): - root: Union[int, List[int]] - - -def my_load_predict_fn(app_config, model): - def returns_model_of_x_plus_len_of_y(x: Union[int, List[int]], y: Union[str, List[str]]) -> Union[int, List[int]]: - """MyRequestSchema -> MyResponseSchema""" - if app_config["mode"] == "single": - assert isinstance(x, int) and isinstance(y, str) - return model(x) + len(y) - - result = [] - for x_i, y_i in zip(x, y): - result.append(model(x_i) + len(y_i)) - return result - - return returns_model_of_x_plus_len_of_y - - -def my_load_model_fn(app_config): - def my_model_single(x: int): - return x * 2 - - def my_model_batched(x: List[int]): - return [my_model_single(x_i) for x_i in x] - - if app_config["mode"] == "single": - return my_model_single - - return my_model_batched - - -BUNDLE_PARAMS_SINGLE = { - "model_bundle_name": "test-bundle-single", - "load_predict_fn": my_load_predict_fn, - "load_model_fn": my_load_model_fn, - "requirements": ["pytest==7.2.1", "numpy"], - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime", - "app_config": {"mode": "single"}, -} -BUNDLE_PARAMS_BATCHED = { - "model_bundle_name": "test-bundle-batched", - "load_predict_fn": my_load_predict_fn, - "load_model_fn": my_load_model_fn, - "requirements": ["pytest==7.2.1", "numpy"], - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime", - "app_config": {"mode": "batched"}, -} - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -bundle_single = client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS_SINGLE) -bundle_batch = client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS_BATCHED) -``` - -## Updating Model Bundles - -Model Bundles are immutable, meaning they cannot be edited once created. -However, it is possible to clone an existing model bundle with a new `app_config` -using -[`clone_model_bundle_with_changes_v2`](/api/client/#launch.client.LaunchClient.clone_model_bundle_with_changes_v2). - -## Listing Model Bundles - -To list all the model bundles you own, use -[`list_model_bundles_v2`](/api/client/#launch.client.LaunchClient.list_model_bundles_v2). diff --git a/docs/concepts/model_endpoints.md b/docs/concepts/model_endpoints.md deleted file mode 100644 index 06abd624..00000000 --- a/docs/concepts/model_endpoints.md +++ /dev/null @@ -1,162 +0,0 @@ -# Model Endpoints - -Model Endpoints are deployments of models that can receive requests and return -predictions containing the results of the model's inference. Each model endpoint -is associated with a model bundle, which contains the model's code. An endpoint -specifies deployment parameters, such as the minimum and maximum number of -workers, as well as the requested resources for each worker, such as the number -of CPUs, amount of memory, GPU count, and type of GPU. - -Endpoints can be asynchronous, synchronous, or streaming. Asynchronous endpoints return -a future immediately after receiving a request, and the future can be used to -retrieve the prediction once it is ready. Synchronous endpoints return the -prediction directly after receiving a request. Streaming endpoints are variants of synchronous -endpoints that return a stream of SSEs instead of a single HTTP response. - -!!! info - # Choosing the right inference mode - - Here are some tips for how to choose between SyncEndpoint, StreamingEndpoint, AsyncEndpoint, and BatchJob for deploying your ModelBundle: - - A SyncEndpoint is good if: - - * You have strict latency requirements (e.g. on the order of seconds or less). - * You are willing to have resources continually allocated. - - A StreamingEndpoint is good if: - - * You have stricter requirements on perceived latency than SyncEndpoint can support (e.g. you want tokens generated by the model to start being returned almost immediately rather than waiting for the model generation to finish). - * You are willing to have resources continually allocated. - - An AsyncEndpoint is good if: - - * You want to save on compute costs. - * Your inference code takes a long time to run. - * Your latency requirements are on the order of minutes. - - A BatchJob is good if: - - * You know there is a large batch of inputs ahead of time. - * You want to optimize for throughput instead of latency. - -## Creating Async Model Endpoints - -Async model endpoints are the most cost-efficient way to perform inference on -tasks that are less latency-sensitive. - -```py title="Creating an Async Model Endpoint" -import os -from launch import LaunchClient - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -endpoint = client.create_model_endpoint( - endpoint_name="demo-endpoint-async", - model_bundle="test-bundle", - cpus=1, - min_workers=0, - endpoint_type="async", - update_if_exists=True, - labels={ - "team": "MY_TEAM", - "product": "MY_PRODUCT", - }, -) -``` - -## Creating Sync Model Endpoints - -Sync model endpoints are useful for latency-sensitive tasks, such as real-time -inference. Sync endpoints are more expensive than async endpoints. -!!! Note - Sync model endpoints require at least 1 `min_worker`. - -```py title="Creating a Sync Model Endpoint" -import os -from launch import LaunchClient - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -endpoint = client.create_model_endpoint( - endpoint_name="demo-endpoint-sync", - model_bundle="test-bundle", - cpus=1, - min_workers=1, - endpoint_type="sync", - update_if_exists=True, - labels={ - "team": "MY_TEAM", - "product": "MY_PRODUCT", - }, -) -``` - -## Creating Streaming Model Endpoints - -Streaming model endpoints are variants of sync model endpoints that are useful for tasks with strict -requirements on perceived latency. Streaming endpoints are more expensive than async endpoints. -!!! Note - Streaming model endpoints require at least 1 `min_worker`. - -```py title="Creating a Streaming Model Endpoint" -import os -from launch import LaunchClient - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -endpoint = client.create_model_endpoint( - endpoint_name="demo-endpoint-streaming", - model_bundle="test-streaming-bundle", - cpus=1, - min_workers=1, - per_worker=1, - endpoint_type="streaming", - update_if_exists=True, - labels={ - "team": "MY_TEAM", - "product": "MY_PRODUCT", - }, -) -``` - -## Managing Model Endpoints - -Model endpoints can be listed, updated, and deleted using the Launch API. - -```py title="Listing Model Endpoints" -import os -from launch import LaunchClient - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -endpoints = client.list_model_endpoints() -``` - -```py title="Updating a Model Endpoint" -import os -from launch import LaunchClient - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -client.edit_model_endpoint( - model_endpoint="demo-endpoint-sync", - max_workers=2, -) -``` - -```py title="Deleting a Model Endpoint" -import time -import os -from launch import LaunchClient - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) -endpoint = client.create_model_endpoint( - endpoint_name="demo-endpoint-tmp", - model_bundle="test-bundle", - cpus=1, - min_workers=0, - endpoint_type="async", - update_if_exists=True, - labels={ - "team": "MY_TEAM", - "product": "MY_PRODUCT", - }, -) -time.sleep(15) # Wait for Launch to build the endpoint -client.delete_model_endpoint(model_endpoint_name="demo-endpoint-tmp") -``` diff --git a/docs/concepts/overview.md b/docs/concepts/overview.md deleted file mode 100644 index d52f5b6d..00000000 --- a/docs/concepts/overview.md +++ /dev/null @@ -1,14 +0,0 @@ -# Overview - -Creating deployments on Launch generally involves three steps: - -1. Create and upload a [`ModelBundle`](../model_bundles). Pass your trained model - as well as pre-/post-processing code to the Scale Launch Python client, and - we’ll create a model bundle based on the code and store it in our Bundle Store. - -2. Create a [`ModelEndpoint`](../model_endpoints). Pass a ModelBundle as well as - infrastructure settings such as the desired number of GPUs to our client. - This provisions resources on Scale’s cluster dedicated to your ModelEndpoint. - -3. Make requests to the ModelEndpoint. You can make requests through the Python - client, or make HTTP requests directly to Scale. diff --git a/docs/guides/custom_docker_images.md b/docs/guides/custom_docker_images.md deleted file mode 100644 index 12946a72..00000000 --- a/docs/guides/custom_docker_images.md +++ /dev/null @@ -1,116 +0,0 @@ -# Custom docker images - -!!! Warning - This feature is currently in beta, and the API is likely to change. Please contact us if you are interested - in using this feature. - -If you need more customization that what cloudpickle or zip artifacts can offer, or if you just already have a pre-built -docker image, then you can create a Model Bundle with that docker image. You will need to modify your image to run a -web server that exposes HTTP port 5005. - -In our example below, we assume that you have some existing Python function `my_inference_fn` that can be imported. -If you need to invoke some other binary (e.g. a custom C++ binary), then you can shell out to the OS to call that binary; -subsequent versions of this document will have native examples for non-Python binaries. - -For choice of web server, we recommend [FastAPI](https://fastapi.tiangolo.com/lo/) due to its speed and ergonomics. -Any web server would work, although we give examples with FastAPI. - -## Step 1: Install Requirements - -You can add `fastapi` and `uvicorn` to the `requirements.txt` file that gets installed as part of your Dockerfile. Alternatively, -you can add `pip install fastapi uvicorn` to the Dockerfile directly. - -## Step 2: Set up a web server application - -Inside your project workspace, create a `server.py` file with these contents: - -```py -# test='skip' -from fastapi import FastAPI - -from pydantic import BaseModel - -app = FastAPI() - -class MyRequestSchema(BaseModel): - url: str - - -class MyResponseSchema(BaseModel): - response: str - -def my_inference_fn(req: MyRequestSchema) -> MyResponseSchema: - # This is an example inference function - you can instead import a function from your own codebase, - # or shell out to the OS, etc. - resp = req.url + "_hello" - return MyResponseSchema(response=resp) - -@app.post("/predict") -async def predict(request: MyRequestSchema) -> MyResponseSchema: - response = my_inference_fn(request) - return response - -@app.get("/readyz") -def readyz(): - return "ok" -``` - -## Step 3: Rebuild and push your image - -Build your updated Dockerfile and push the image to a location that is accessible by Scale. For instance, if you are -using AWS ECR, please make sure that the necessary cross-account permissions allow Scale to pull your docker image. - -## Step 4: Deploy! - -Now you can upload your docker image as a Model Bundle, and then create a Model Endpoint referencing that Model Bundle. Note that `path.to.your.server.file:app` in the `command` section below should be relative to the `WORKDIR` of your docker image. - - -```py -# test='skip' -import os - -from launch import LaunchClient - -from server import MyRequestSchema, MyResponseSchema # Defined as part of your server.py - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - -model_bundle_name = "my_bundle_name" - -client.create_model_bundle_from_runnable_image_v2( - model_bundle_name=model_bundle_name, - request_schema=MyRequestSchema, - response_schema=MyResponseSchema, - repository="$YOUR_ECR_REPO", - tag="$YOUR_IMAGE_TAG", - command=[ - "dumb-init", - "--", - "uvicorn", - "path.to.your.server.file:app", - "--port", - "5005", - "--host", - "::", - ], - predict_route="/predict", - healthcheck_route="/readyz", - readiness_initial_delay_seconds=120, - env={}, -) - -client.create_model_endpoint( - endpoint_name=f"endpoint-{model_bundle_name}", - model_bundle=model_bundle_name, - endpoint_type="async", - min_workers=0, - max_workers=1, - per_worker=1, - memory="30Gi", - storage="40Gi", - cpus=4, # This must be at least 2 because forwarding services consume 1 cpu. - gpus=1, - gpu_type="nvidia-ampere-a10", - update_if_exists=True, -) -``` diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 8479a954..00000000 --- a/docs/index.md +++ /dev/null @@ -1,123 +0,0 @@ -# Scale Launch - -[![CI](https://circleci.com/gh/scaleapi/launch-python-client.svg)](https://circleci.com/gh/scaleapi/launch-python-client) -[![pypi](https://img.shields.io/pypi/v/scale-launch.svg)](https://pypi.python.org/pypi/scale-launch) - -Simple, scalable, and high performance ML service deployment in python. - -## Example - -```py title="Launch Usage" -import os -import time -from launch import LaunchClient -from launch import EndpointRequest -from pydantic import BaseModel, RootModel -from rich import print - - -class MyRequestSchema(BaseModel): - x: int - y: str - -class MyResponseSchema(RootModel): - root: int - - -def my_load_predict_fn(model): - def returns_model_of_x_plus_len_of_y(x: int, y: str) -> int: - """MyRequestSchema -> MyResponseSchema""" - assert isinstance(x, int) and isinstance(y, str) - return model(x) + len(y) - - return returns_model_of_x_plus_len_of_y - - -def my_load_model_fn(): - def my_model(x): - return x * 2 - - return my_model - -BUNDLE_PARAMS = { - "model_bundle_name": "test-bundle", - "load_predict_fn": my_load_predict_fn, - "load_model_fn": my_load_model_fn, - "request_schema": MyRequestSchema, - "response_schema": MyResponseSchema, - "requirements": ["pytest==7.2.1", "numpy"], # list your requirements here - "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime", -} - -ENDPOINT_PARAMS = { - "endpoint_name": "demo-endpoint", - "model_bundle": "test-bundle", - "cpus": 1, - "min_workers": 0, - "endpoint_type": "async", - "update_if_exists": True, - "labels": { - "team": "MY_TEAM", - "product": "launch", - } -} - -def predict_on_endpoint(request: MyRequestSchema) -> MyResponseSchema: - # Wait for the endpoint to be ready first before submitting a task - endpoint = client.get_model_endpoint(endpoint_name="demo-endpoint") - while endpoint.status() != "READY": - time.sleep(10) - - endpoint_request = EndpointRequest(args=request.dict(), return_pickled=False) - - future = endpoint.predict(request=endpoint_request) - raw_response = future.get() - - response = MyResponseSchema.parse_raw(raw_response.result) - return response - - -client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY")) - -client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS) -endpoint = client.create_model_endpoint(**ENDPOINT_PARAMS) - -request = MyRequestSchema(x=5, y="hello") -response = predict_on_endpoint(request) -print(response) -""" -MyResponseSchema(root=10) -""" -``` - -What's going on here: - -* First we use [`pydantic`](https://github.com/pydantic/pydantic) to define our request and response - schemas, `MyRequestSchema` and `MyResponseSchema`. These schemas are used to generate the API - documentation for our models. -* Next we define the the `model` and the `load_predict_fn`, which tells Launch - how to load our model and how to make predictions with it. In this case, - we're just returning a function that adds the length of the string `y` to - `model(x)`, where `model` doubles the integer `x`. -* We then define the model bundle by specifying the `load_predict_fn`, the `request_schema`, and the - `response_schema`. We also specify the `env_params`, which tell Launch environment settings like - the base image to use. In this case, we're using a PyTorch image. -* Next, we create the model endpoint, which is the API that we'll use to make predictions. We - specify the `model_bundle` that we created above, and we specify the `endpoint_type`, which tells - Launch whether to use a synchronous or asynchronous endpoint. In this case, we're using an - asynchronous endpoint, which means that we can make predictions and return immediately with a - `future` object. We can then use the `future` object to get the prediction result later. -* Finally, we make a prediction by calling `predict_on_endpoint` with a `MyRequestSchema` object. - This function first waits for the endpoint to be ready, then it submits a prediction request to - the endpoint. It then waits for the prediction result and returns it. - -Notice that we specified `min_workers=0`, meaning that the endpoint will scale down to 0 workers -when it's not being used. - -## Installation - -To use Scale Launch, first install it using `pip`: - -```commandline title="Installation" -pip install -U scale-launch -``` diff --git a/docs/models/Annotation.md b/docs/models/Annotation.md deleted file mode 100644 index 38d0a336..00000000 --- a/docs/models/Annotation.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.annotation.Annotation - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the URL citation. Always `url_citation`. | must be one of ["url_citation", ] -**url_citation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A URL citation when using web search. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Audio.md b/docs/models/Audio.md deleted file mode 100644 index f44481a8..00000000 --- a/docs/models/Audio.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.audio.Audio - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Unique identifier for a previous audio response from the model. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Audio1.md b/docs/models/Audio1.md deleted file mode 100644 index 6d81db0a..00000000 --- a/docs/models/Audio1.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.audio1.Audio1 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**expires_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations. | -**transcript** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Transcript of the audio generated by the model. | -**data** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Base64 encoded audio bytes generated by the model, in the format specified in the request. | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Unique identifier for this audio response. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Audio2.md b/docs/models/Audio2.md deleted file mode 100644 index 8abc7b01..00000000 --- a/docs/models/Audio2.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.audio2.Audio2 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**voice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The voice the model uses to respond. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. | -**format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. | must be one of ["wav", "aac", "mp3", "flac", "opus", "pcm16", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/BatchCompletionsJob.md b/docs/models/BatchCompletionsJob.md deleted file mode 100644 index 4f064b93..00000000 --- a/docs/models/BatchCompletionsJob.md +++ /dev/null @@ -1,24 +0,0 @@ -# launch.api_client.model.batch_completions_job.BatchCompletionsJob - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**completed_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**expires_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model configuration for the batch inference. Hardware configurations are inferred. | -**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | -**status** | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | | -**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] -**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/BatchCompletionsJobStatus.md b/docs/models/BatchCompletionsJobStatus.md deleted file mode 100644 index ea4c16e8..00000000 --- a/docs/models/BatchCompletionsJobStatus.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.batch_completions_job_status.BatchCompletionsJobStatus - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/BatchCompletionsModelConfig.md b/docs/models/BatchCompletionsModelConfig.md deleted file mode 100644 index aae7b481..00000000 --- a/docs/models/BatchCompletionsModelConfig.md +++ /dev/null @@ -1,55 +0,0 @@ -# launch.api_client.model.batch_completions_model_config.BatchCompletionsModelConfig - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | -**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] -**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] -**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] -**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false -**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] -**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] -**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] -**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] -**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] -**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] -**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] -**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] -**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] -**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] -**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] -**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for the model. | [optional] -**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] -**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] -**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] -**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] -**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] -**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] -**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] -**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] -**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] -**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] -**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] -**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] -**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] -**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the checkpoint to load the model from. | [optional] -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Suggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config. System may decide to use a different number than the given value. | [optional] if omitted the server will use the default value of 1 -**max_context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len. | [optional] -**response_role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Role of the response in the conversation. Only supported in chat completions. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/BatchJobSerializationFormat.md b/docs/models/BatchJobSerializationFormat.md deleted file mode 100644 index 9f25654c..00000000 --- a/docs/models/BatchJobSerializationFormat.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.batch_job_serialization_format.BatchJobSerializationFormat - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/BatchJobStatus.md b/docs/models/BatchJobStatus.md deleted file mode 100644 index aa250ada..00000000 --- a/docs/models/BatchJobStatus.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.batch_job_status.BatchJobStatus - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/BodyUploadFileV1FilesPost.md b/docs/models/BodyUploadFileV1FilesPost.md deleted file mode 100644 index f6bcac2b..00000000 --- a/docs/models/BodyUploadFileV1FilesPost.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.body_upload_file_v1_files_post.BodyUploadFileV1FilesPost - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**file** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CallbackAuth.md b/docs/models/CallbackAuth.md deleted file mode 100644 index 573a9437..00000000 --- a/docs/models/CallbackAuth.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.callback_auth.CallbackAuth - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CallbackBasicAuth.md b/docs/models/CallbackBasicAuth.md deleted file mode 100644 index 01b70697..00000000 --- a/docs/models/CallbackBasicAuth.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.callback_basic_auth.CallbackBasicAuth - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**password** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**kind** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["basic", ] -**username** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CallbackmTLSAuth.md b/docs/models/CallbackmTLSAuth.md deleted file mode 100644 index 35531dd1..00000000 --- a/docs/models/CallbackmTLSAuth.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.callbackm_tls_auth.CallbackmTLSAuth - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**kind** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["mtls", ] -**cert** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**key** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CancelBatchCompletionsV2Response.md b/docs/models/CancelBatchCompletionsV2Response.md deleted file mode 100644 index 2a15119e..00000000 --- a/docs/models/CancelBatchCompletionsV2Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.cancel_batch_completions_v2_response.CancelBatchCompletionsV2Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether the cancellation was successful | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CancelFineTuneResponse.md b/docs/models/CancelFineTuneResponse.md deleted file mode 100644 index f992928a..00000000 --- a/docs/models/CancelFineTuneResponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.cancel_fine_tune_response.CancelFineTuneResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionFunctionCallOption.md b/docs/models/ChatCompletionFunctionCallOption.md deleted file mode 100644 index cd4771cd..00000000 --- a/docs/models/ChatCompletionFunctionCallOption.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.chat_completion_function_call_option.ChatCompletionFunctionCallOption - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionFunctions.md b/docs/models/ChatCompletionFunctions.md deleted file mode 100644 index 6bcf2f67..00000000 --- a/docs/models/ChatCompletionFunctions.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.chat_completion_functions.ChatCompletionFunctions - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | -**description** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A description of what the function does, used by the model to choose when and how to call the function. | [optional] -**parameters** | [**FunctionParameters**](FunctionParameters.md) | [**FunctionParameters**](FunctionParameters.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionMessageToolCall.md b/docs/models/ChatCompletionMessageToolCall.md deleted file mode 100644 index 11d26184..00000000 --- a/docs/models/ChatCompletionMessageToolCall.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.chat_completion_message_tool_call.ChatCompletionMessageToolCall - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**function** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The function that the model called. | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The ID of the tool call. | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | must be one of ["function", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionMessageToolCallChunk.md b/docs/models/ChatCompletionMessageToolCallChunk.md deleted file mode 100644 index a9da0672..00000000 --- a/docs/models/ChatCompletionMessageToolCallChunk.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.chat_completion_message_tool_call_chunk.ChatCompletionMessageToolCallChunk - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The ID of the tool call. | [optional] -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | [optional] must be one of ["function", ] -**function** | [**Function2**](Function2.md) | [**Function2**](Function2.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionMessageToolCallsInput.md b/docs/models/ChatCompletionMessageToolCallsInput.md deleted file mode 100644 index 7baf96ff..00000000 --- a/docs/models/ChatCompletionMessageToolCallsInput.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.chat_completion_message_tool_calls_input.ChatCompletionMessageToolCallsInput - -The tool calls generated by the model, such as function calls. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionMessageToolCallsOutput.md b/docs/models/ChatCompletionMessageToolCallsOutput.md deleted file mode 100644 index c2232b77..00000000 --- a/docs/models/ChatCompletionMessageToolCallsOutput.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.chat_completion_message_tool_calls_output.ChatCompletionMessageToolCallsOutput - -The tool calls generated by the model, such as function calls. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionNamedToolChoice.md b/docs/models/ChatCompletionNamedToolChoice.md deleted file mode 100644 index dba91cc9..00000000 --- a/docs/models/ChatCompletionNamedToolChoice.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.chat_completion_named_tool_choice.ChatCompletionNamedToolChoice - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**function** | [**Function3**](Function3.md) | [**Function3**](Function3.md) | | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | must be one of ["function", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestAssistantMessage.md b/docs/models/ChatCompletionRequestAssistantMessage.md deleted file mode 100644 index 9b40c74a..00000000 --- a/docs/models/ChatCompletionRequestAssistantMessage.md +++ /dev/null @@ -1,21 +0,0 @@ -# launch.api_client.model.chat_completion_request_assistant_message.ChatCompletionRequestAssistantMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `assistant`. | must be one of ["assistant", ] -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. | [optional] -**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message by the assistant. | [optional] -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] -**audio** | [**Audio**](Audio.md) | [**Audio**](Audio.md) | Data about a previous audio response from the model. [Learn more](/docs/guides/audio). | [optional] -**tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | [optional] -**function_call** | [**FunctionCall**](FunctionCall.md) | [**FunctionCall**](FunctionCall.md) | Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestAssistantMessageContentPart.md b/docs/models/ChatCompletionRequestAssistantMessageContentPart.md deleted file mode 100644 index 9981a8b3..00000000 --- a/docs/models/ChatCompletionRequestAssistantMessageContentPart.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.chat_completion_request_assistant_message_content_part.ChatCompletionRequestAssistantMessageContentPart - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestDeveloperMessage.md b/docs/models/ChatCompletionRequestDeveloperMessage.md deleted file mode 100644 index 3622a78d..00000000 --- a/docs/models/ChatCompletionRequestDeveloperMessage.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.chat_completion_request_developer_message.ChatCompletionRequestDeveloperMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `developer`. | must be one of ["developer", ] -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the developer message. | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestFunctionMessage.md b/docs/models/ChatCompletionRequestFunctionMessage.md deleted file mode 100644 index 14304683..00000000 --- a/docs/models/ChatCompletionRequestFunctionMessage.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.chat_completion_request_function_message.ChatCompletionRequestFunctionMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `function`. | must be one of ["function", ] -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the function message. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestMessage.md b/docs/models/ChatCompletionRequestMessage.md deleted file mode 100644 index 3fc709f0..00000000 --- a/docs/models/ChatCompletionRequestMessage.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.chat_completion_request_message.ChatCompletionRequestMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestMessageContentPartAudio.md b/docs/models/ChatCompletionRequestMessageContentPartAudio.md deleted file mode 100644 index b91cfa31..00000000 --- a/docs/models/ChatCompletionRequestMessageContentPartAudio.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.chat_completion_request_message_content_part_audio.ChatCompletionRequestMessageContentPartAudio - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**input_audio** | [**InputAudio**](InputAudio.md) | [**InputAudio**](InputAudio.md) | | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. Always `input_audio`. | must be one of ["input_audio", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestMessageContentPartFile.md b/docs/models/ChatCompletionRequestMessageContentPartFile.md deleted file mode 100644 index 3580290a..00000000 --- a/docs/models/ChatCompletionRequestMessageContentPartFile.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.chat_completion_request_message_content_part_file.ChatCompletionRequestMessageContentPartFile - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**file** | [**File**](File.md) | [**File**](File.md) | | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. Always `file`. | must be one of ["file", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestMessageContentPartImage.md b/docs/models/ChatCompletionRequestMessageContentPartImage.md deleted file mode 100644 index 0efc771b..00000000 --- a/docs/models/ChatCompletionRequestMessageContentPartImage.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.chat_completion_request_message_content_part_image.ChatCompletionRequestMessageContentPartImage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**image_url** | [**ImageUrl**](ImageUrl.md) | [**ImageUrl**](ImageUrl.md) | | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. | must be one of ["image_url", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestMessageContentPartRefusal.md b/docs/models/ChatCompletionRequestMessageContentPartRefusal.md deleted file mode 100644 index cda03882..00000000 --- a/docs/models/ChatCompletionRequestMessageContentPartRefusal.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.chat_completion_request_message_content_part_refusal.ChatCompletionRequestMessageContentPartRefusal - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message generated by the model. | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. | must be one of ["refusal", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestMessageContentPartText.md b/docs/models/ChatCompletionRequestMessageContentPartText.md deleted file mode 100644 index f973e48b..00000000 --- a/docs/models/ChatCompletionRequestMessageContentPartText.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.chat_completion_request_message_content_part_text.ChatCompletionRequestMessageContentPartText - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The text content. | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. | must be one of ["text", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestSystemMessage.md b/docs/models/ChatCompletionRequestSystemMessage.md deleted file mode 100644 index 89df8eb9..00000000 --- a/docs/models/ChatCompletionRequestSystemMessage.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.chat_completion_request_system_message.ChatCompletionRequestSystemMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `system`. | must be one of ["system", ] -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the system message. | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestSystemMessageContentPart.md b/docs/models/ChatCompletionRequestSystemMessageContentPart.md deleted file mode 100644 index d28d1965..00000000 --- a/docs/models/ChatCompletionRequestSystemMessageContentPart.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.chat_completion_request_system_message_content_part.ChatCompletionRequestSystemMessageContentPart - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestToolMessage.md b/docs/models/ChatCompletionRequestToolMessage.md deleted file mode 100644 index 0ae3cadc..00000000 --- a/docs/models/ChatCompletionRequestToolMessage.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.chat_completion_request_tool_message.ChatCompletionRequestToolMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `tool`. | must be one of ["tool", ] -**tool_call_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call that this message is responding to. | -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the tool message. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestToolMessageContentPart.md b/docs/models/ChatCompletionRequestToolMessageContentPart.md deleted file mode 100644 index 1f254fac..00000000 --- a/docs/models/ChatCompletionRequestToolMessageContentPart.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.chat_completion_request_tool_message_content_part.ChatCompletionRequestToolMessageContentPart - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestUserMessage.md b/docs/models/ChatCompletionRequestUserMessage.md deleted file mode 100644 index e48395ec..00000000 --- a/docs/models/ChatCompletionRequestUserMessage.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.chat_completion_request_user_message.ChatCompletionRequestUserMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `user`. | must be one of ["user", ] -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the user message. | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionRequestUserMessageContentPart.md b/docs/models/ChatCompletionRequestUserMessageContentPart.md deleted file mode 100644 index ec541402..00000000 --- a/docs/models/ChatCompletionRequestUserMessageContentPart.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.chat_completion_request_user_message_content_part.ChatCompletionRequestUserMessageContentPart - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionResponseMessage.md b/docs/models/ChatCompletionResponseMessage.md deleted file mode 100644 index 515ccbeb..00000000 --- a/docs/models/ChatCompletionResponseMessage.md +++ /dev/null @@ -1,21 +0,0 @@ -# launch.api_client.model.chat_completion_response_message.ChatCompletionResponseMessage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the author of this message. | must be one of ["assistant", ] -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the message. | [optional] -**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message generated by the model. | [optional] -**tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | [optional] -**annotations** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Annotations for the message, when applicable, as when using the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] -**function_call** | [**FunctionCall**](FunctionCall.md) | [**FunctionCall**](FunctionCall.md) | Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | [optional] -**audio** | [**Audio1**](Audio1.md) | [**Audio1**](Audio1.md) | If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](/docs/guides/audio). | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionStreamOptions.md b/docs/models/ChatCompletionStreamOptions.md deleted file mode 100644 index 97279a28..00000000 --- a/docs/models/ChatCompletionStreamOptions.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.chat_completion_stream_options.ChatCompletionStreamOptions - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**include_usage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. **NOTE:** If the stream is interrupted, you may not receive the final usage chunk which contains the total token usage for the request. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionStreamResponseDelta.md b/docs/models/ChatCompletionStreamResponseDelta.md deleted file mode 100644 index c36d2c02..00000000 --- a/docs/models/ChatCompletionStreamResponseDelta.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.chat_completion_stream_response_delta.ChatCompletionStreamResponseDelta - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the chunk message. | [optional] -**function_call** | [**FunctionCall2**](FunctionCall2.md) | [**FunctionCall2**](FunctionCall2.md) | Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | [optional] -**tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the author of this message. | [optional] must be one of ["developer", "system", "user", "assistant", "tool", ] -**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message generated by the model. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionTokenLogprob.md b/docs/models/ChatCompletionTokenLogprob.md deleted file mode 100644 index edb7ebf3..00000000 --- a/docs/models/ChatCompletionTokenLogprob.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.chat_completion_token_logprob.ChatCompletionTokenLogprob - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. | -**logprob** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | -**bytes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. | -**token** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The token. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionTool.md b/docs/models/ChatCompletionTool.md deleted file mode 100644 index 4e3d657a..00000000 --- a/docs/models/ChatCompletionTool.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.chat_completion_tool.ChatCompletionTool - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**function** | [**FunctionObject**](FunctionObject.md) | [**FunctionObject**](FunctionObject.md) | | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | must be one of ["function", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionToolChoiceOption.md b/docs/models/ChatCompletionToolChoiceOption.md deleted file mode 100644 index 4f1d2f7d..00000000 --- a/docs/models/ChatCompletionToolChoiceOption.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.chat_completion_tool_choice_option.ChatCompletionToolChoiceOption - -Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionV2Request.md b/docs/models/ChatCompletionV2Request.md deleted file mode 100644 index 79ae9210..00000000 --- a/docs/models/ChatCompletionV2Request.md +++ /dev/null @@ -1,72 +0,0 @@ -# launch.api_client.model.chat_completion_v2_request.ChatCompletionV2Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**messages** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of messages comprising the conversation so far. Depending on the [model](/docs/models) you use, different message types (modalities) are supported, like [text](/docs/guides/text-generation), [images](/docs/guides/vision), and [audio](/docs/guides/audio). | -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | -**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of output sequences that are generated from the prompt. From these `best_of` sequences, the top `n` sequences are returned. `best_of` must be greater than or equal to `n`. This is treated as the beam width when `use_beam_search` is True. By default, `best_of` is set to `n`. | [optional] -**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] -**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] -**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] -**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] -**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] -**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] -**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] -**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. Defaults to False. | [optional] -**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] -**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] -**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the new message will be prepended with the last message if they belong to the same role. | [optional] -**add_generation_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model. | [optional] -**continue_final_message** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`. | [optional] -**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default). | [optional] -**documents** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given | [optional] -**chat_template_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Additional kwargs to pass to the template renderer. Will be accessible by the chat template. | [optional] -**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] -**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] -**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] -**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] -**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] -**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] -**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling. | [optional] -**metadata** | [**Metadata**](Metadata.md) | [**Metadata**](Metadata.md) | | [optional] -**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 -**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 -**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] -**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] -**modalities** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | [optional] -**reasoning_effort** | [**ReasoningEffort**](ReasoningEffort.md) | [**ReasoningEffort**](ReasoningEffort.md) | | [optional] -**max_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] -**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. | [optional] if omitted the server will use the default value of 0 -**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | [optional] if omitted the server will use the default value of 0 -**web_search_options** | [**WebSearchOptions**](WebSearchOptions.md) | [**WebSearchOptions**](WebSearchOptions.md) | This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] -**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | [optional] -**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An object specifying the format that the model must output. Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. | [optional] -**audio** | [**Audio2**](Audio2.md) | [**Audio2**](Audio2.md) | Parameters for audio output. Required when audio output is requested with `modalities: [\"audio\"]`. [Learn more](/docs/guides/audio). | [optional] -**store** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether or not to store the output of this chat completion request for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products. | [optional] if omitted the server will use the default value of false -**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). | [optional] if omitted the server will use the default value of false -**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] -**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | [optional] -**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. | [optional] if omitted the server will use the default value of false -**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o-series models](/docs/guides/reasoning). | [optional] -**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. | [optional] if omitted the server will use the default value of 1 -**prediction** | [**PredictionContent**](PredictionContent.md) | [**PredictionContent**](PredictionContent.md) | Configuration for a [Predicted Output](/docs/guides/predicted-outputs), which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. | [optional] -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] -**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] -**tools** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. | [optional] -**tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. | [optional] -**parallel_tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | [optional] -**function_call** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. | [optional] -**functions** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ChatCompletionV2StreamErrorChunk.md b/docs/models/ChatCompletionV2StreamErrorChunk.md deleted file mode 100644 index 339fe97f..00000000 --- a/docs/models/ChatCompletionV2StreamErrorChunk.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.chat_completion_v2_stream_error_chunk.ChatCompletionV2StreamErrorChunk - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**error** | [**StreamError**](StreamError.md) | [**StreamError**](StreamError.md) | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Choice.md b/docs/models/Choice.md deleted file mode 100644 index f6e21930..00000000 --- a/docs/models/Choice.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.choice.Choice - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**finish_reason** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. | must be one of ["stop", "length", "tool_calls", "content_filter", "function_call", ] -**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the choice in the list of choices. | -**message** | [**ChatCompletionResponseMessage**](ChatCompletionResponseMessage.md) | [**ChatCompletionResponseMessage**](ChatCompletionResponseMessage.md) | | -**logprobs** | [**Logprobs**](Logprobs.md) | [**Logprobs**](Logprobs.md) | Log probability information for the choice. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Choice1.md b/docs/models/Choice1.md deleted file mode 100644 index 74991b30..00000000 --- a/docs/models/Choice1.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.choice1.Choice1 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**finish_reason** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. | must be one of ["stop", "length", "tool_calls", "content_filter", "function_call", ] -**delta** | [**ChatCompletionStreamResponseDelta**](ChatCompletionStreamResponseDelta.md) | [**ChatCompletionStreamResponseDelta**](ChatCompletionStreamResponseDelta.md) | | -**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the choice in the list of choices. | -**logprobs** | [**Logprobs**](Logprobs.md) | [**Logprobs**](Logprobs.md) | Log probability information for the choice. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Choice2.md b/docs/models/Choice2.md deleted file mode 100644 index 36306adf..00000000 --- a/docs/models/Choice2.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.choice2.Choice2 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**finish_reason** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. | must be one of ["stop", "length", "content_filter", ] -**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**logprobs** | [**Logprobs2**](Logprobs2.md) | [**Logprobs2**](Logprobs2.md) | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CloneModelBundleV1Request.md b/docs/models/CloneModelBundleV1Request.md deleted file mode 100644 index 5ca434da..00000000 --- a/docs/models/CloneModelBundleV1Request.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.clone_model_bundle_v1_request.CloneModelBundleV1Request - -Request object for cloning a Model Bundle from another one. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for cloning a Model Bundle from another one. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**original_model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**new_app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CloneModelBundleV2Request.md b/docs/models/CloneModelBundleV2Request.md deleted file mode 100644 index 3b09c946..00000000 --- a/docs/models/CloneModelBundleV2Request.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.clone_model_bundle_v2_request.CloneModelBundleV2Request - -Request object for cloning a Model Bundle from another one. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for cloning a Model Bundle from another one. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**original_model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**new_app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CloudpickleArtifactFlavor.md b/docs/models/CloudpickleArtifactFlavor.md deleted file mode 100644 index aee1338a..00000000 --- a/docs/models/CloudpickleArtifactFlavor.md +++ /dev/null @@ -1,23 +0,0 @@ -# launch.api_client.model.cloudpickle_artifact_flavor.CloudpickleArtifactFlavor - -This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["cloudpickle_artifact", ] -**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**load_model_fn** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**load_predict_fn** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionOutput.md b/docs/models/CompletionOutput.md deleted file mode 100644 index a384f964..00000000 --- a/docs/models/CompletionOutput.md +++ /dev/null @@ -1,20 +0,0 @@ -# launch.api_client.model.completion_output.CompletionOutput - -Represents the output of a completion request to a model. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Represents the output of a completion request to a model. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**num_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**num_prompt_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionStreamOutput.md b/docs/models/CompletionStreamOutput.md deleted file mode 100644 index 47db1595..00000000 --- a/docs/models/CompletionStreamOutput.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.completion_stream_output.CompletionStreamOutput - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**finished** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**num_prompt_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**token** | [**TokenOutput**](TokenOutput.md) | [**TokenOutput**](TokenOutput.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionStreamV1Request.md b/docs/models/CompletionStreamV1Request.md deleted file mode 100644 index 15a58483..00000000 --- a/docs/models/CompletionStreamV1Request.md +++ /dev/null @@ -1,31 +0,0 @@ -# launch.api_client.model.completion_stream_v1_request.CompletionStreamV1Request - -Request object for a stream prompt completion task. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for a stream prompt completion task. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**max_new_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**stop_sequences** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**return_token_log_probs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionStreamV1Response.md b/docs/models/CompletionStreamV1Response.md deleted file mode 100644 index 4e784563..00000000 --- a/docs/models/CompletionStreamV1Response.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.completion_stream_v1_response.CompletionStreamV1Response - -Error of the response (if any). - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Error of the response (if any). | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**request_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**output** | [**CompletionStreamOutput**](CompletionStreamOutput.md) | [**CompletionStreamOutput**](CompletionStreamOutput.md) | | [optional] -**error** | [**StreamError**](StreamError.md) | [**StreamError**](StreamError.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionSyncV1Request.md b/docs/models/CompletionSyncV1Request.md deleted file mode 100644 index 540f650d..00000000 --- a/docs/models/CompletionSyncV1Request.md +++ /dev/null @@ -1,31 +0,0 @@ -# launch.api_client.model.completion_sync_v1_request.CompletionSyncV1Request - -Request object for a synchronous prompt completion task. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for a synchronous prompt completion task. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**max_new_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**stop_sequences** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**return_token_log_probs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionSyncV1Response.md b/docs/models/CompletionSyncV1Response.md deleted file mode 100644 index 7d977144..00000000 --- a/docs/models/CompletionSyncV1Response.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.completion_sync_v1_response.CompletionSyncV1Response - -Response object for a synchronous prompt completion. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for a synchronous prompt completion. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**request_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**output** | [**CompletionOutput**](CompletionOutput.md) | [**CompletionOutput**](CompletionOutput.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionTokensDetails.md b/docs/models/CompletionTokensDetails.md deleted file mode 100644 index 631527f5..00000000 --- a/docs/models/CompletionTokensDetails.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.completion_tokens_details.CompletionTokensDetails - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**accepted_prediction_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion. | [optional] if omitted the server will use the default value of 0 -**audio_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Audio input tokens generated by the model. | [optional] if omitted the server will use the default value of 0 -**reasoning_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokens generated by the model for reasoning. | [optional] if omitted the server will use the default value of 0 -**rejected_prediction_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion. However, like reasoning tokens, these tokens are still counted in the total completion tokens for purposes of billing, output, and context window limits. | [optional] if omitted the server will use the default value of 0 -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionUsage.md b/docs/models/CompletionUsage.md deleted file mode 100644 index 7902b33d..00000000 --- a/docs/models/CompletionUsage.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.completion_usage.CompletionUsage - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tokens in the generated completion. | -**prompt_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tokens in the prompt. | -**total_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Total number of tokens used in the request (prompt + completion). | -**completion_tokens_details** | [**CompletionTokensDetails**](CompletionTokensDetails.md) | [**CompletionTokensDetails**](CompletionTokensDetails.md) | Breakdown of tokens used in a completion. | [optional] -**prompt_tokens_details** | [**PromptTokensDetails**](PromptTokensDetails.md) | [**PromptTokensDetails**](PromptTokensDetails.md) | Breakdown of tokens used in the prompt. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionV2Request.md b/docs/models/CompletionV2Request.md deleted file mode 100644 index 1e29a3d8..00000000 --- a/docs/models/CompletionV2Request.md +++ /dev/null @@ -1,52 +0,0 @@ -# launch.api_client.model.completion_v2_request.CompletionV2Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | -**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | -**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 -**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] -**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] -**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] -**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] -**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] -**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] -**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] -**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. | [optional] -**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] -**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] -**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true (the default), special tokens (e.g. BOS) will be added to the prompt. | [optional] -**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported. | [optional] -**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] -**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] -**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] -**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] -**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] -**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] -**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Echo back the prompt in addition to the completion | [optional] if omitted the server will use the default value of false -**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 -**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated. | [optional] -**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. | [optional] -**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. | [optional] if omitted the server will use the default value of 16 -**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 -**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] -**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] -**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). | [optional] if omitted the server will use the default value of false -**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] -**suffix** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The suffix that comes after a completion of inserted text. This parameter is only supported for `gpt-3.5-turbo-instruct`. | [optional] -**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 -**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 -**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CompletionV2StreamErrorChunk.md b/docs/models/CompletionV2StreamErrorChunk.md deleted file mode 100644 index bb469573..00000000 --- a/docs/models/CompletionV2StreamErrorChunk.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.completion_v2_stream_error_chunk.CompletionV2StreamErrorChunk - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**error** | [**StreamError**](StreamError.md) | [**StreamError**](StreamError.md) | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Content.md b/docs/models/Content.md deleted file mode 100644 index 789d858e..00000000 --- a/docs/models/Content.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.content.Content - -An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Content1.md b/docs/models/Content1.md deleted file mode 100644 index dfab0eef..00000000 --- a/docs/models/Content1.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.content1.Content1 - -An array of content parts with a defined type. For developer messages, only type `text` is supported. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. For developer messages, only type `text` is supported. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Content2.md b/docs/models/Content2.md deleted file mode 100644 index 5d1c6ff3..00000000 --- a/docs/models/Content2.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.content2.Content2 - -An array of content parts with a defined type. For system messages, only type `text` is supported. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. For system messages, only type `text` is supported. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Content3.md b/docs/models/Content3.md deleted file mode 100644 index 6cb17b17..00000000 --- a/docs/models/Content3.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.content3.Content3 - -An array of content parts with a defined type. For tool messages, only type `text` is supported. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. For tool messages, only type `text` is supported. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Content4.md b/docs/models/Content4.md deleted file mode 100644 index 918af8f8..00000000 --- a/docs/models/Content4.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.content4.Content4 - -An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Content8.md b/docs/models/Content8.md deleted file mode 100644 index 9a82ed37..00000000 --- a/docs/models/Content8.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.content8.Content8 - -An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateAsyncTaskV1Response.md b/docs/models/CreateAsyncTaskV1Response.md deleted file mode 100644 index ccd80047..00000000 --- a/docs/models/CreateAsyncTaskV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_async_task_v1_response.CreateAsyncTaskV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchCompletionsV1ModelConfig.md b/docs/models/CreateBatchCompletionsV1ModelConfig.md deleted file mode 100644 index 0673637c..00000000 --- a/docs/models/CreateBatchCompletionsV1ModelConfig.md +++ /dev/null @@ -1,56 +0,0 @@ -# launch.api_client.model.create_batch_completions_v1_model_config.CreateBatchCompletionsV1ModelConfig - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | -**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] -**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] -**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] -**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false -**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] -**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] -**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] -**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] -**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] -**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] -**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] -**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] -**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] -**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] -**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] -**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for the model. | [optional] -**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] -**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] -**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] -**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] -**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] -**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] -**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] -**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] -**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] -**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] -**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] -**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] -**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] -**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the checkpoint to load the model from. | [optional] -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Suggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config. System may decide to use a different number than the given value. | [optional] if omitted the server will use the default value of 1 -**max_context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len. | [optional] -**response_role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Role of the response in the conversation. Only supported in chat completions. | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Labels to attach to the batch inference job. | [optional] if omitted the server will use the default value of {} -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchCompletionsV1Request.md b/docs/models/CreateBatchCompletionsV1Request.md deleted file mode 100644 index b544eac0..00000000 --- a/docs/models/CreateBatchCompletionsV1Request.md +++ /dev/null @@ -1,31 +0,0 @@ -# launch.api_client.model.create_batch_completions_v1_request.CreateBatchCompletionsV1Request - -Request object for batch completions. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for batch completions. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_config** | [**CreateBatchCompletionsV1ModelConfig**](CreateBatchCompletionsV1ModelConfig.md) | [**CreateBatchCompletionsV1ModelConfig**](CreateBatchCompletionsV1ModelConfig.md) | | -**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | -**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Labels to attach to the batch inference job. | [optional] if omitted the server will use the default value of {} -**data_parallelism** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference. | [optional] if omitted the server will use the default value of 1 -**max_runtime_sec** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum runtime of the batch inference in seconds. Default to one day. | [optional] if omitted the server will use the default value of 86400 -**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] -**tool_config** | [**ToolConfig**](ToolConfig.md) | [**ToolConfig**](ToolConfig.md) | Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | CPUs to use for the batch inference. | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPUs to use for the batch inference. | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Amount of memory to use for the batch inference. | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | GPU type to use for the batch inference. | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Storage to use for the batch inference. | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of nodes per worker for the batch inference. | [optional] -**content** | [**CreateBatchCompletionsV1RequestContent**](CreateBatchCompletionsV1RequestContent.md) | [**CreateBatchCompletionsV1RequestContent**](CreateBatchCompletionsV1RequestContent.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchCompletionsV1RequestContent.md b/docs/models/CreateBatchCompletionsV1RequestContent.md deleted file mode 100644 index 92b4fd41..00000000 --- a/docs/models/CreateBatchCompletionsV1RequestContent.md +++ /dev/null @@ -1,24 +0,0 @@ -# launch.api_client.model.create_batch_completions_v1_request_content.CreateBatchCompletionsV1RequestContent - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**max_new_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**prompts** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**stop_sequences** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**return_token_log_probs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchCompletionsV1Response.md b/docs/models/CreateBatchCompletionsV1Response.md deleted file mode 100644 index 78970ff2..00000000 --- a/docs/models/CreateBatchCompletionsV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_batch_completions_v1_response.CreateBatchCompletionsV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchCompletionsV2Request.md b/docs/models/CreateBatchCompletionsV2Request.md deleted file mode 100644 index 5209757f..00000000 --- a/docs/models/CreateBatchCompletionsV2Request.md +++ /dev/null @@ -1,31 +0,0 @@ -# launch.api_client.model.create_batch_completions_v2_request.CreateBatchCompletionsV2Request - -Request object for batch completions. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for batch completions. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model configuration for the batch inference. Hardware configurations are inferred. | -**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | -**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Labels to attach to the batch inference job. | [optional] if omitted the server will use the default value of {} -**data_parallelism** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference. | [optional] if omitted the server will use the default value of 1 -**max_runtime_sec** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum runtime of the batch inference in seconds. Default to one day. | [optional] if omitted the server will use the default value of 86400 -**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] -**tool_config** | [**ToolConfig**](ToolConfig.md) | [**ToolConfig**](ToolConfig.md) | Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | CPUs to use for the batch inference. | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPUs to use for the batch inference. | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Amount of memory to use for the batch inference. | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | GPU type to use for the batch inference. | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Storage to use for the batch inference. | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of nodes per worker for the batch inference. | [optional] -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Either `input_data_path` or `content` needs to be provided. When input_data_path is provided, the input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchJobResourceRequests.md b/docs/models/CreateBatchJobResourceRequests.md deleted file mode 100644 index 766c1630..00000000 --- a/docs/models/CreateBatchJobResourceRequests.md +++ /dev/null @@ -1,22 +0,0 @@ -# launch.api_client.model.create_batch_job_resource_requests.CreateBatchJobResourceRequests - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchJobV1Request.md b/docs/models/CreateBatchJobV1Request.md deleted file mode 100644 index 2874eaf8..00000000 --- a/docs/models/CreateBatchJobV1Request.md +++ /dev/null @@ -1,20 +0,0 @@ -# launch.api_client.model.create_batch_job_v1_request.CreateBatchJobV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**resource_requests** | [**CreateBatchJobResourceRequests**](CreateBatchJobResourceRequests.md) | [**CreateBatchJobResourceRequests**](CreateBatchJobResourceRequests.md) | | -**serialization_format** | [**BatchJobSerializationFormat**](BatchJobSerializationFormat.md) | [**BatchJobSerializationFormat**](BatchJobSerializationFormat.md) | | -**input_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**timeout_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 43200.0 -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateBatchJobV1Response.md b/docs/models/CreateBatchJobV1Response.md deleted file mode 100644 index 2591a24f..00000000 --- a/docs/models/CreateBatchJobV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_batch_job_v1_response.CreateBatchJobV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateChatCompletionResponse.md b/docs/models/CreateChatCompletionResponse.md deleted file mode 100644 index 5011124e..00000000 --- a/docs/models/CreateChatCompletionResponse.md +++ /dev/null @@ -1,22 +0,0 @@ -# launch.api_client.model.create_chat_completion_response.CreateChatCompletionResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**created** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) of when the chat completion was created. | -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model used for the chat completion. | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier for the chat completion. | -**choices** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of chat completion choices. Can be more than one if `n` is greater than 1. | -**object** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The object type, which is always `chat.completion`. | must be one of ["chat.completion", ] -**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] -**system_fingerprint** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. | [optional] -**usage** | [**CompletionUsage**](CompletionUsage.md) | [**CompletionUsage**](CompletionUsage.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateChatCompletionStreamResponse.md b/docs/models/CreateChatCompletionStreamResponse.md deleted file mode 100644 index ff4b84df..00000000 --- a/docs/models/CreateChatCompletionStreamResponse.md +++ /dev/null @@ -1,22 +0,0 @@ -# launch.api_client.model.create_chat_completion_stream_response.CreateChatCompletionStreamResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**created** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. | -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model to generate the completion. | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier for the chat completion. Each chunk has the same ID. | -**choices** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`. | -**object** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The object type, which is always `chat.completion.chunk`. | must be one of ["chat.completion.chunk", ] -**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] -**system_fingerprint** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. | [optional] -**usage** | [**CompletionUsage**](CompletionUsage.md) | [**CompletionUsage**](CompletionUsage.md) | An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request. When present, it contains a null value **except for the last chunk** which contains the token usage statistics for the entire request. **NOTE:** If the stream is interrupted or cancelled, you may not receive the final usage chunk which contains the total token usage for the request. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateCompletionResponse.md b/docs/models/CreateCompletionResponse.md deleted file mode 100644 index 9f64ce59..00000000 --- a/docs/models/CreateCompletionResponse.md +++ /dev/null @@ -1,21 +0,0 @@ -# launch.api_client.model.create_completion_response.CreateCompletionResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**created** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) of when the completion was created. | -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model used for completion. | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier for the completion. | -**choices** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The list of completion choices the model generated for the input prompt. | -**object** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The object type, which is always \"text_completion\" | must be one of ["text_completion", ] -**system_fingerprint** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. | [optional] -**usage** | [**CompletionUsage**](CompletionUsage.md) | [**CompletionUsage**](CompletionUsage.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateDeepSpeedModelEndpointRequest.md b/docs/models/CreateDeepSpeedModelEndpointRequest.md deleted file mode 100644 index 7e5db065..00000000 --- a/docs/models/CreateDeepSpeedModelEndpointRequest.md +++ /dev/null @@ -1,44 +0,0 @@ -# launch.api_client.model.create_deep_speed_model_endpoint_request.CreateDeepSpeedModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["deepspeed", ] if omitted the server will use the default value of deepspeed -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateDockerImageBatchJobBundleV1Request.md b/docs/models/CreateDockerImageBatchJobBundleV1Request.md deleted file mode 100644 index bfb65c5f..00000000 --- a/docs/models/CreateDockerImageBatchJobBundleV1Request.md +++ /dev/null @@ -1,22 +0,0 @@ -# launch.api_client.model.create_docker_image_batch_job_bundle_v1_request.CreateDockerImageBatchJobBundleV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**image_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of {} -**mount_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**resource_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of {} -**public** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateDockerImageBatchJobBundleV1Response.md b/docs/models/CreateDockerImageBatchJobBundleV1Response.md deleted file mode 100644 index ea485921..00000000 --- a/docs/models/CreateDockerImageBatchJobBundleV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_docker_image_batch_job_bundle_v1_response.CreateDockerImageBatchJobBundleV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**docker_image_batch_job_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateDockerImageBatchJobResourceRequests.md b/docs/models/CreateDockerImageBatchJobResourceRequests.md deleted file mode 100644 index f38701b3..00000000 --- a/docs/models/CreateDockerImageBatchJobResourceRequests.md +++ /dev/null @@ -1,20 +0,0 @@ -# launch.api_client.model.create_docker_image_batch_job_resource_requests.CreateDockerImageBatchJobResourceRequests - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateDockerImageBatchJobV1Request.md b/docs/models/CreateDockerImageBatchJobV1Request.md deleted file mode 100644 index 69232d1c..00000000 --- a/docs/models/CreateDockerImageBatchJobV1Request.md +++ /dev/null @@ -1,20 +0,0 @@ -# launch.api_client.model.create_docker_image_batch_job_v1_request.CreateDockerImageBatchJobV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**docker_image_batch_job_bundle_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**docker_image_batch_job_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**job_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**resource_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of {} -**override_job_max_runtime_s** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateDockerImageBatchJobV1Response.md b/docs/models/CreateDockerImageBatchJobV1Response.md deleted file mode 100644 index c5bcef34..00000000 --- a/docs/models/CreateDockerImageBatchJobV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_docker_image_batch_job_v1_response.CreateDockerImageBatchJobV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateFineTuneRequest.md b/docs/models/CreateFineTuneRequest.md deleted file mode 100644 index 5e0a72b3..00000000 --- a/docs/models/CreateFineTuneRequest.md +++ /dev/null @@ -1,20 +0,0 @@ -# launch.api_client.model.create_fine_tune_request.CreateFineTuneRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**training_file** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**hyperparameters** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**validation_file** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**suffix** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**wandb_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateFineTuneResponse.md b/docs/models/CreateFineTuneResponse.md deleted file mode 100644 index b1723a07..00000000 --- a/docs/models/CreateFineTuneResponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_fine_tune_response.CreateFineTuneResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateLLMModelEndpointV1Request.md b/docs/models/CreateLLMModelEndpointV1Request.md deleted file mode 100644 index 9b73d5c3..00000000 --- a/docs/models/CreateLLMModelEndpointV1Request.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.create_llm_model_endpoint_v1_request.CreateLLMModelEndpointV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateLLMModelEndpointV1Response.md b/docs/models/CreateLLMModelEndpointV1Response.md deleted file mode 100644 index 521eecce..00000000 --- a/docs/models/CreateLLMModelEndpointV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_llm_model_endpoint_v1_response.CreateLLMModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateLightLLMModelEndpointRequest.md b/docs/models/CreateLightLLMModelEndpointRequest.md deleted file mode 100644 index 501943ab..00000000 --- a/docs/models/CreateLightLLMModelEndpointRequest.md +++ /dev/null @@ -1,44 +0,0 @@ -# launch.api_client.model.create_light_llm_model_endpoint_request.CreateLightLLMModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["lightllm", ] if omitted the server will use the default value of lightllm -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateModelBundleV1Request.md b/docs/models/CreateModelBundleV1Request.md deleted file mode 100644 index a8bc8ff2..00000000 --- a/docs/models/CreateModelBundleV1Request.md +++ /dev/null @@ -1,24 +0,0 @@ -# launch.api_client.model.create_model_bundle_v1_request.CreateModelBundleV1Request - -Request object for creating a Model Bundle. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for creating a Model Bundle. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**packaging_type** | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**env_params** | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | | -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateModelBundleV1Response.md b/docs/models/CreateModelBundleV1Response.md deleted file mode 100644 index e23c7ee6..00000000 --- a/docs/models/CreateModelBundleV1Response.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.create_model_bundle_v1_response.CreateModelBundleV1Response - -Response object for creating a Model Bundle. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for creating a Model Bundle. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateModelBundleV2Request.md b/docs/models/CreateModelBundleV2Request.md deleted file mode 100644 index e504787d..00000000 --- a/docs/models/CreateModelBundleV2Request.md +++ /dev/null @@ -1,20 +0,0 @@ -# launch.api_client.model.create_model_bundle_v2_request.CreateModelBundleV2Request - -Request object for creating a Model Bundle. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for creating a Model Bundle. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateModelBundleV2Response.md b/docs/models/CreateModelBundleV2Response.md deleted file mode 100644 index dd6408a5..00000000 --- a/docs/models/CreateModelBundleV2Response.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.create_model_bundle_v2_response.CreateModelBundleV2Response - -Response object for creating a Model Bundle. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for creating a Model Bundle. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateModelEndpointV1Request.md b/docs/models/CreateModelEndpointV1Request.md deleted file mode 100644 index 7c299b20..00000000 --- a/docs/models/CreateModelEndpointV1Request.md +++ /dev/null @@ -1,37 +0,0 @@ -# launch.api_client.model.create_model_endpoint_v1_request.CreateModelEndpointV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**endpoint_type** | [**ModelEndpointType**](ModelEndpointType.md) | [**ModelEndpointType**](ModelEndpointType.md) | | -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateModelEndpointV1Response.md b/docs/models/CreateModelEndpointV1Response.md deleted file mode 100644 index ca6bdde5..00000000 --- a/docs/models/CreateModelEndpointV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_model_endpoint_v1_response.CreateModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateSGLangModelEndpointRequest.md b/docs/models/CreateSGLangModelEndpointRequest.md deleted file mode 100644 index 8db17a8d..00000000 --- a/docs/models/CreateSGLangModelEndpointRequest.md +++ /dev/null @@ -1,132 +0,0 @@ -# launch.api_client.model.create_sg_lang_model_endpoint_request.CreateSGLangModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync -**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false -**tp_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tensor parallel size. | [optional] -**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, skip init tokenizer and pass input_ids in generate request | [optional] -**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. | [optional] -**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. | [optional] -**kv_cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. \"auto\" will use model data type. | [optional] -**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the JSON file containing the KV cache scaling factors. | [optional] -**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The quantization method. | [optional] -**context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model's maximum context length. | [optional] -**device** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The device type. | [optional] -**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Override the model name returned by the v1/models endpoint in OpenAI API server. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The builtin chat template name or path of the chat template file. | [optional] -**is_embedding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use a CausalLM as an embedding model. | [optional] -**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. | [optional] -**mem_fraction_static** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The fraction of the memory used for static allocation. | [optional] -**max_running_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of running requests. | [optional] -**max_total_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in the memory pool. | [optional] -**chunked_prefill_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a chunk for the chunked prefill. | [optional] -**max_prefill_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a prefill batch. | [optional] -**schedule_policy** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The scheduling policy of the requests. | [optional] -**schedule_conservativeness** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How conservative the schedule policy is. | [optional] -**cpu_offload_gb** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many GBs of RAM to reserve for CPU offloading | [optional] -**prefill_only_one_req** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, we only prefill one request at one prefill batch | [optional] -**stream_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The interval for streaming in terms of the token length. | [optional] -**random_seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The random seed. | [optional] -**constrained_json_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex pattern for syntactic whitespaces allowed in JSON constrained output. | [optional] -**watchdog_timeout** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set watchdog timeout in seconds. | [optional] -**download_dir** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model download directory. | [optional] -**base_gpu_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The base GPU ID to start allocating GPUs from. | [optional] -**log_level** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of all loggers. | [optional] -**log_level_http** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of HTTP server. | [optional] -**log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Log the inputs and outputs of all requests. | [optional] -**show_time_cost** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Show time cost of custom marks. | [optional] -**enable_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable log prometheus metrics. | [optional] -**decode_log_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log interval of decode batch. | [optional] -**api_key** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set API key of the server. | [optional] -**file_storage_pth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the file storage in backend. | [optional] -**enable_cache_report** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Return number of cached tokens in usage.prompt_tokens_details. | [optional] -**data_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The data parallelism size. | [optional] -**load_balance_method** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The load balancing strategy for data parallelism. | [optional] -**expert_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The expert parallelism size. | [optional] -**dist_init_addr** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The host address for initializing distributed backend. | [optional] -**nnodes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of nodes. | [optional] -**node_rank** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The node rank. | [optional] -**json_model_override_args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A dictionary in JSON string format used to override default model configurations. | [optional] -**lora_paths** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The list of LoRA adapters. | [optional] -**max_loras_per_batch** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of adapters for a running batch. | [optional] -**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for attention layers. | [optional] -**sampling_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for sampling layers. | [optional] -**grammar_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the backend for grammar-guided decoding. | [optional] -**speculative_algorithm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Speculative algorithm. | [optional] -**speculative_draft_model_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the draft model weights. | [optional] -**speculative_num_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of steps sampled from draft model in Speculative Decoding. | [optional] -**speculative_num_draft_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in Speculative Decoding. | [optional] -**speculative_eagle_topk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in eagle2 each step. | [optional] -**enable_double_sparsity** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable double sparsity attention | [optional] -**ds_channel_config_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the double sparsity channel config | [optional] -**ds_heavy_channel_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy channels in double sparsity attention | [optional] -**ds_heavy_token_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy tokens in double sparsity attention | [optional] -**ds_heavy_channel_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of heavy channels in double sparsity attention | [optional] -**ds_sparse_decode_threshold** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The threshold for sparse decoding in double sparsity attention | [optional] -**disable_radix_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable RadixAttention for prefix caching. | [optional] -**disable_jump_forward** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable jump-forward for grammar-guided decoding. | [optional] -**disable_cuda_graph** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph. | [optional] -**disable_cuda_graph_padding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph when padding is needed. | [optional] -**disable_outlines_disk_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable disk cache of outlines. | [optional] -**disable_custom_all_reduce** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the custom all-reduce kernel. | [optional] -**disable_mla** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable Multi-head Latent Attention (MLA) for DeepSeek-V2. | [optional] -**disable_overlap_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the overlap scheduler. | [optional] -**enable_mixed_chunk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable mixing prefill and decode in a batch when using chunked prefill. | [optional] -**enable_dp_attention** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable data parallelism for attention and tensor parallelism for FFN. | [optional] -**enable_ep_moe** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable expert parallelism for moe. | [optional] -**enable_torch_compile** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torch.compile. | [optional] -**torch_compile_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size when using torch compile. | [optional] -**cuda_graph_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size for cuda graph. | [optional] -**cuda_graph_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the list of batch sizes for cuda graph. | [optional] -**torchao_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torchao. | [optional] -**enable_nan_detection** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable the NaN detection for debugging purposes. | [optional] -**enable_p2p_check** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable P2P check for GPU access. | [optional] -**triton_attention_reduce_in_fp32** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Cast the intermediate attention results to fp32. | [optional] -**triton_attention_num_kv_splits** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of KV splits in flash decoding Triton kernel. | [optional] -**num_continuous_decode_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Run multiple continuous decoding steps to reduce scheduling overhead. | [optional] -**delete_ckpt_after_loading** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Delete the model checkpoint after loading the model. | [optional] -**enable_memory_saver** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow saving memory using release_memory_occupation and resume_memory_occupation | [optional] -**allow_auto_truncate** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow automatically truncating requests that exceed the maximum input length. | [optional] -**enable_custom_logit_processor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable users to pass custom logit processors to the server. | [optional] -**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specify the parser for handling tool-call interactions. | [optional] -**huggingface_repo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Hugging Face repository ID. | [optional] -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["sglang", ] if omitted the server will use the default value of sglang -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateTensorRTLLMModelEndpointRequest.md b/docs/models/CreateTensorRTLLMModelEndpointRequest.md deleted file mode 100644 index be014059..00000000 --- a/docs/models/CreateTensorRTLLMModelEndpointRequest.md +++ /dev/null @@ -1,44 +0,0 @@ -# launch.api_client.model.create_tensor_rtllm_model_endpoint_request.CreateTensorRTLLMModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["tensorrt_llm", ] if omitted the server will use the default value of tensorrt_llm -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md b/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md deleted file mode 100644 index ec6191c6..00000000 --- a/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md +++ /dev/null @@ -1,44 +0,0 @@ -# launch.api_client.model.create_text_generation_inference_model_endpoint_request.CreateTextGenerationInferenceModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["text_generation_inference", ] if omitted the server will use the default value of text_generation_inference -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateTriggerV1Request.md b/docs/models/CreateTriggerV1Request.md deleted file mode 100644 index 08830f2d..00000000 --- a/docs/models/CreateTriggerV1Request.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.create_trigger_v1_request.CreateTriggerV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**cron_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**default_job_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_job_metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateTriggerV1Response.md b/docs/models/CreateTriggerV1Response.md deleted file mode 100644 index b714267e..00000000 --- a/docs/models/CreateTriggerV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.create_trigger_v1_response.CreateTriggerV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**trigger_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CreateVLLMModelEndpointRequest.md b/docs/models/CreateVLLMModelEndpointRequest.md deleted file mode 100644 index e4b09196..00000000 --- a/docs/models/CreateVLLMModelEndpointRequest.md +++ /dev/null @@ -1,82 +0,0 @@ -# launch.api_client.model.create_vllm_model_endpoint_request.CreateVLLMModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync -**max_gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig | [optional] -**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Attention backend to use for vLLM. Default to None. | [optional] -**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] -**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] -**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] -**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false -**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] -**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] -**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] -**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] -**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] -**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] -**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] -**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] -**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] -**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] -**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] -**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for reproducibility. | [optional] -**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] -**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] -**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] -**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] -**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] -**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] -**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] -**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] -**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] -**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] -**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] -**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] -**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] -**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["vllm", ] if omitted the server will use the default value of vllm -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/CustomFramework.md b/docs/models/CustomFramework.md deleted file mode 100644 index d09b7660..00000000 --- a/docs/models/CustomFramework.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.custom_framework.CustomFramework - -This is the entity-layer class for a custom framework specification. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a custom framework specification. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**image_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**framework_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["custom_base_image", ] -**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/DeleteFileResponse.md b/docs/models/DeleteFileResponse.md deleted file mode 100644 index 198545e8..00000000 --- a/docs/models/DeleteFileResponse.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.delete_file_response.DeleteFileResponse - -Response object for deleting a file. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for deleting a file. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**deleted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether deletion was successful. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/DeleteLLMEndpointResponse.md b/docs/models/DeleteLLMEndpointResponse.md deleted file mode 100644 index 5306832d..00000000 --- a/docs/models/DeleteLLMEndpointResponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.delete_llm_endpoint_response.DeleteLLMEndpointResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**deleted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/DeleteModelEndpointV1Response.md b/docs/models/DeleteModelEndpointV1Response.md deleted file mode 100644 index 062057f3..00000000 --- a/docs/models/DeleteModelEndpointV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.delete_model_endpoint_v1_response.DeleteModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**deleted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/DeleteTriggerV1Response.md b/docs/models/DeleteTriggerV1Response.md deleted file mode 100644 index 7ec831cc..00000000 --- a/docs/models/DeleteTriggerV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.delete_trigger_v1_response.DeleteTriggerV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/DockerImageBatchJob.md b/docs/models/DockerImageBatchJob.md deleted file mode 100644 index 57327ff1..00000000 --- a/docs/models/DockerImageBatchJob.md +++ /dev/null @@ -1,25 +0,0 @@ -# launch.api_client.model.docker_image_batch_job.DockerImageBatchJob - -This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job created via the \"supply a docker image for a k8s job\" API. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job created via the \"supply a docker image for a k8s job\" API. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**owner** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_by** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**status** | [**BatchJobStatus**](BatchJobStatus.md) | [**BatchJobStatus**](BatchJobStatus.md) | | -**completed_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] value must conform to RFC-3339 date-time -**annotations** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**override_job_max_runtime_s** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/DockerImageBatchJobBundleV1Response.md b/docs/models/DockerImageBatchJobBundleV1Response.md deleted file mode 100644 index e50f5e43..00000000 --- a/docs/models/DockerImageBatchJobBundleV1Response.md +++ /dev/null @@ -1,28 +0,0 @@ -# launch.api_client.model.docker_image_batch_job_bundle_v1_response.DockerImageBatchJobBundleV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**image_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**mount_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/EndpointPredictV1Request.md b/docs/models/EndpointPredictV1Request.md deleted file mode 100644 index ba60dd92..00000000 --- a/docs/models/EndpointPredictV1Request.md +++ /dev/null @@ -1,21 +0,0 @@ -# launch.api_client.model.endpoint_predict_v1_request.EndpointPredictV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cloudpickle** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**return_pickled** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**destination_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/File.md b/docs/models/File.md deleted file mode 100644 index b0b00491..00000000 --- a/docs/models/File.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.file.File - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**filename** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the file, used when passing the file to the model as a string. | [optional] -**file_data** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The base64 encoded file data, used when passing the file to the model as a string. | [optional] -**file_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The ID of an uploaded file to use as input. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/FilteredChatCompletionV2Request.md b/docs/models/FilteredChatCompletionV2Request.md deleted file mode 100644 index 787824c2..00000000 --- a/docs/models/FilteredChatCompletionV2Request.md +++ /dev/null @@ -1,72 +0,0 @@ -# launch.api_client.model.filtered_chat_completion_v2_request.FilteredChatCompletionV2Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**messages** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of messages comprising the conversation so far. Depending on the [model](/docs/models) you use, different message types (modalities) are supported, like [text](/docs/guides/text-generation), [images](/docs/guides/vision), and [audio](/docs/guides/audio). | -**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of output sequences that are generated from the prompt. From these `best_of` sequences, the top `n` sequences are returned. `best_of` must be greater than or equal to `n`. This is treated as the beam width when `use_beam_search` is True. By default, `best_of` is set to `n`. | [optional] -**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] -**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] -**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] -**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] -**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] -**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] -**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] -**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. Defaults to False. | [optional] -**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] -**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] -**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the new message will be prepended with the last message if they belong to the same role. | [optional] -**add_generation_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model. | [optional] -**continue_final_message** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`. | [optional] -**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default). | [optional] -**documents** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given | [optional] -**chat_template_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Additional kwargs to pass to the template renderer. Will be accessible by the chat template. | [optional] -**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] -**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] -**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] -**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] -**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] -**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] -**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling. | [optional] -**metadata** | [**Metadata**](Metadata.md) | [**Metadata**](Metadata.md) | | [optional] -**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 -**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 -**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] -**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**modalities** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | [optional] -**reasoning_effort** | [**ReasoningEffort**](ReasoningEffort.md) | [**ReasoningEffort**](ReasoningEffort.md) | | [optional] -**max_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] -**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. | [optional] if omitted the server will use the default value of 0 -**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | [optional] if omitted the server will use the default value of 0 -**web_search_options** | [**WebSearchOptions**](WebSearchOptions.md) | [**WebSearchOptions**](WebSearchOptions.md) | This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] -**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | [optional] -**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An object specifying the format that the model must output. Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. | [optional] -**audio** | [**Audio2**](Audio2.md) | [**Audio2**](Audio2.md) | Parameters for audio output. Required when audio output is requested with `modalities: [\"audio\"]`. [Learn more](/docs/guides/audio). | [optional] -**store** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether or not to store the output of this chat completion request for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products. | [optional] if omitted the server will use the default value of false -**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] -**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | [optional] -**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. | [optional] if omitted the server will use the default value of false -**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o-series models](/docs/guides/reasoning). | [optional] -**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. | [optional] if omitted the server will use the default value of 1 -**prediction** | [**PredictionContent**](PredictionContent.md) | [**PredictionContent**](PredictionContent.md) | Configuration for a [Predicted Output](/docs/guides/predicted-outputs), which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. | [optional] -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] -**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] -**tools** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. | [optional] -**tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. | [optional] -**parallel_tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | [optional] -**function_call** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. | [optional] -**functions** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/FilteredCompletionV2Request.md b/docs/models/FilteredCompletionV2Request.md deleted file mode 100644 index c41361d8..00000000 --- a/docs/models/FilteredCompletionV2Request.md +++ /dev/null @@ -1,52 +0,0 @@ -# launch.api_client.model.filtered_completion_v2_request.FilteredCompletionV2Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | -**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 -**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] -**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] -**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] -**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] -**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] -**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] -**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] -**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. | [optional] -**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] -**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] -**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true -**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true (the default), special tokens (e.g. BOS) will be added to the prompt. | [optional] -**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported. | [optional] -**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] -**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] -**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] -**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] -**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] -**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] -**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Echo back the prompt in addition to the completion | [optional] if omitted the server will use the default value of false -**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 -**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated. | [optional] -**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. | [optional] -**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. | [optional] if omitted the server will use the default value of 16 -**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 -**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] -**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] -**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] -**suffix** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The suffix that comes after a completion of inserted text. This parameter is only supported for `gpt-3.5-turbo-instruct`. | [optional] -**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 -**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 -**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Function1.md b/docs/models/Function1.md deleted file mode 100644 index b8abe0fb..00000000 --- a/docs/models/Function1.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.function1.Function1 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | -**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Function2.md b/docs/models/Function2.md deleted file mode 100644 index 7cc1484e..00000000 --- a/docs/models/Function2.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.function2.Function2 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | [optional] -**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Function3.md b/docs/models/Function3.md deleted file mode 100644 index f91e8561..00000000 --- a/docs/models/Function3.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.function3.Function3 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/FunctionCall.md b/docs/models/FunctionCall.md deleted file mode 100644 index c440d9a3..00000000 --- a/docs/models/FunctionCall.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.function_call.FunctionCall - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | -**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/FunctionCall2.md b/docs/models/FunctionCall2.md deleted file mode 100644 index 5f3a8af3..00000000 --- a/docs/models/FunctionCall2.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.function_call2.FunctionCall2 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | [optional] -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/FunctionObject.md b/docs/models/FunctionObject.md deleted file mode 100644 index f5316302..00000000 --- a/docs/models/FunctionObject.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.function_object.FunctionObject - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | -**description** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A description of what the function does, used by the model to choose when and how to call the function. | [optional] -**parameters** | [**FunctionParameters**](FunctionParameters.md) | [**FunctionParameters**](FunctionParameters.md) | | [optional] -**strict** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). | [optional] if omitted the server will use the default value of false -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/FunctionParameters.md b/docs/models/FunctionParameters.md deleted file mode 100644 index edc0955e..00000000 --- a/docs/models/FunctionParameters.md +++ /dev/null @@ -1,14 +0,0 @@ -# launch.api_client.model.function_parameters.FunctionParameters - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetAsyncTaskV1Response.md b/docs/models/GetAsyncTaskV1Response.md deleted file mode 100644 index 062b0cca..00000000 --- a/docs/models/GetAsyncTaskV1Response.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.get_async_task_v1_response.GetAsyncTaskV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**status** | [**TaskStatus**](TaskStatus.md) | [**TaskStatus**](TaskStatus.md) | | -**result** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**traceback** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**status_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetBatchCompletionV2Response.md b/docs/models/GetBatchCompletionV2Response.md deleted file mode 100644 index 59366755..00000000 --- a/docs/models/GetBatchCompletionV2Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.get_batch_completion_v2_response.GetBatchCompletionV2Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**job** | [**BatchCompletionsJob**](BatchCompletionsJob.md) | [**BatchCompletionsJob**](BatchCompletionsJob.md) | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetBatchJobV1Response.md b/docs/models/GetBatchJobV1Response.md deleted file mode 100644 index d46aaa29..00000000 --- a/docs/models/GetBatchJobV1Response.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.get_batch_job_v1_response.GetBatchJobV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**duration** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**status** | [**BatchJobStatus**](BatchJobStatus.md) | [**BatchJobStatus**](BatchJobStatus.md) | | -**result** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_tasks_pending** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_tasks_completed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetDockerImageBatchJobV1Response.md b/docs/models/GetDockerImageBatchJobV1Response.md deleted file mode 100644 index 69c63d3c..00000000 --- a/docs/models/GetDockerImageBatchJobV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.get_docker_image_batch_job_v1_response.GetDockerImageBatchJobV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**status** | [**BatchJobStatus**](BatchJobStatus.md) | [**BatchJobStatus**](BatchJobStatus.md) | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetFileContentResponse.md b/docs/models/GetFileContentResponse.md deleted file mode 100644 index aca9dab1..00000000 --- a/docs/models/GetFileContentResponse.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.get_file_content_response.GetFileContentResponse - -Response object for retrieving a file's content. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for retrieving a file's content. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the requested file. | -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | File content. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetFileResponse.md b/docs/models/GetFileResponse.md deleted file mode 100644 index e7d7a480..00000000 --- a/docs/models/GetFileResponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# launch.api_client.model.get_file_response.GetFileResponse - -Response object for retrieving a file. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for retrieving a file. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**filename** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | File name. | -**size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Length of the file, in characters. | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the requested file. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetFineTuneEventsResponse.md b/docs/models/GetFineTuneEventsResponse.md deleted file mode 100644 index e361c2b6..00000000 --- a/docs/models/GetFineTuneEventsResponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.get_fine_tune_events_response.GetFineTuneEventsResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**events** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetFineTuneResponse.md b/docs/models/GetFineTuneResponse.md deleted file mode 100644 index e5bcd2f3..00000000 --- a/docs/models/GetFineTuneResponse.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.get_fine_tune_response.GetFineTuneResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Unique ID of the fine tune | -**status** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Status of the requested fine tune. | -**fine_tuned_model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name of the resulting fine-tuned model. This can be plugged into the Completion API ones the fine-tune is complete | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetLLMModelEndpointV1Response.md b/docs/models/GetLLMModelEndpointV1Response.md deleted file mode 100644 index c3151dc7..00000000 --- a/docs/models/GetLLMModelEndpointV1Response.md +++ /dev/null @@ -1,26 +0,0 @@ -# launch.api_client.model.get_llm_model_endpoint_v1_response.GetLLMModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**inference_framework** | [**LLMInferenceFramework**](LLMInferenceFramework.md) | [**LLMInferenceFramework**](LLMInferenceFramework.md) | | -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | -**status** | [**ModelEndpointStatus**](ModelEndpointStatus.md) | [**ModelEndpointStatus**](ModelEndpointStatus.md) | | -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**spec** | [**GetModelEndpointV1Response**](GetModelEndpointV1Response.md) | [**GetModelEndpointV1Response**](GetModelEndpointV1Response.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetModelEndpointV1Response.md b/docs/models/GetModelEndpointV1Response.md deleted file mode 100644 index 11477156..00000000 --- a/docs/models/GetModelEndpointV1Response.md +++ /dev/null @@ -1,35 +0,0 @@ -# launch.api_client.model.get_model_endpoint_v1_response.GetModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**endpoint_type** | [**ModelEndpointType**](ModelEndpointType.md) | [**ModelEndpointType**](ModelEndpointType.md) | | -**last_updated_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time -**destination** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time -**bundle_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_by** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**status** | [**ModelEndpointStatus**](ModelEndpointStatus.md) | [**ModelEndpointStatus**](ModelEndpointStatus.md) | | -**deployment_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**aws_role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**results_s3_bucket** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**deployment_state** | [**ModelEndpointDeploymentState**](ModelEndpointDeploymentState.md) | [**ModelEndpointDeploymentState**](ModelEndpointDeploymentState.md) | | [optional] -**resource_state** | [**ModelEndpointResourceState**](ModelEndpointResourceState.md) | [**ModelEndpointResourceState**](ModelEndpointResourceState.md) | | [optional] -**num_queued_items** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GetTriggerV1Response.md b/docs/models/GetTriggerV1Response.md deleted file mode 100644 index 67616be5..00000000 --- a/docs/models/GetTriggerV1Response.md +++ /dev/null @@ -1,23 +0,0 @@ -# launch.api_client.model.get_trigger_v1_response.GetTriggerV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**owner** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**cron_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**docker_image_batch_job_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_by** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**default_job_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_job_metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/GpuType.md b/docs/models/GpuType.md deleted file mode 100644 index 2710e41e..00000000 --- a/docs/models/GpuType.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.gpu_type.GpuType - -Lists allowed GPU types for Launch. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Lists allowed GPU types for Launch. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/HTTPValidationError.md b/docs/models/HTTPValidationError.md deleted file mode 100644 index 9b4bbdc9..00000000 --- a/docs/models/HTTPValidationError.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.http_validation_error.HTTPValidationError - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**detail** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ImageUrl.md b/docs/models/ImageUrl.md deleted file mode 100644 index f3a8ae25..00000000 --- a/docs/models/ImageUrl.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.image_url.ImageUrl - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Either a URL of the image or the base64 encoded image data. | -**detail** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding). | [optional] must be one of ["auto", "low", "high", ] if omitted the server will use the default value of auto -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/InputAudio.md b/docs/models/InputAudio.md deleted file mode 100644 index 428ef818..00000000 --- a/docs/models/InputAudio.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.input_audio.InputAudio - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**data** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Base64 encoded audio data. | -**format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the encoded audio data. Currently supports \"wav\" and \"mp3\". | must be one of ["wav", "mp3", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/JsonSchema.md b/docs/models/JsonSchema.md deleted file mode 100644 index c1d1acd6..00000000 --- a/docs/models/JsonSchema.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.json_schema.JsonSchema - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | -**description** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A description of what the response format is for, used by the model to determine how to respond in the format. | [optional] -**schema** | [**ResponseFormatJsonSchemaSchema**](ResponseFormatJsonSchemaSchema.md) | [**ResponseFormatJsonSchemaSchema**](ResponseFormatJsonSchemaSchema.md) | | [optional] -**strict** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). | [optional] if omitted the server will use the default value of false -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/LLMFineTuneEvent.md b/docs/models/LLMFineTuneEvent.md deleted file mode 100644 index a3dbcda8..00000000 --- a/docs/models/LLMFineTuneEvent.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.llm_fine_tune_event.LLMFineTuneEvent - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**level** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**message** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**timestamp** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/LLMInferenceFramework.md b/docs/models/LLMInferenceFramework.md deleted file mode 100644 index 897cabdd..00000000 --- a/docs/models/LLMInferenceFramework.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.llm_inference_framework.LLMInferenceFramework - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/LLMSource.md b/docs/models/LLMSource.md deleted file mode 100644 index 7cbd03f2..00000000 --- a/docs/models/LLMSource.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.llm_source.LLMSource - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListDockerImageBatchJobBundleV1Response.md b/docs/models/ListDockerImageBatchJobBundleV1Response.md deleted file mode 100644 index d1d5204f..00000000 --- a/docs/models/ListDockerImageBatchJobBundleV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.list_docker_image_batch_job_bundle_v1_response.ListDockerImageBatchJobBundleV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**docker_image_batch_job_bundles** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListDockerImageBatchJobsV1Response.md b/docs/models/ListDockerImageBatchJobsV1Response.md deleted file mode 100644 index 4cb6a053..00000000 --- a/docs/models/ListDockerImageBatchJobsV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.list_docker_image_batch_jobs_v1_response.ListDockerImageBatchJobsV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**jobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListFilesResponse.md b/docs/models/ListFilesResponse.md deleted file mode 100644 index 50fb04f2..00000000 --- a/docs/models/ListFilesResponse.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.list_files_response.ListFilesResponse - -Response object for listing files. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for listing files. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**files** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of file IDs, names, and sizes. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListFineTunesResponse.md b/docs/models/ListFineTunesResponse.md deleted file mode 100644 index 2222ad09..00000000 --- a/docs/models/ListFineTunesResponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.list_fine_tunes_response.ListFineTunesResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**jobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListLLMModelEndpointsV1Response.md b/docs/models/ListLLMModelEndpointsV1Response.md deleted file mode 100644 index 9ebdfbb2..00000000 --- a/docs/models/ListLLMModelEndpointsV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.list_llm_model_endpoints_v1_response.ListLLMModelEndpointsV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_endpoints** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListModelBundlesV1Response.md b/docs/models/ListModelBundlesV1Response.md deleted file mode 100644 index 77f59123..00000000 --- a/docs/models/ListModelBundlesV1Response.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.list_model_bundles_v1_response.ListModelBundlesV1Response - -Response object for listing Model Bundles. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for listing Model Bundles. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_bundles** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListModelBundlesV2Response.md b/docs/models/ListModelBundlesV2Response.md deleted file mode 100644 index 19769683..00000000 --- a/docs/models/ListModelBundlesV2Response.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.list_model_bundles_v2_response.ListModelBundlesV2Response - -Response object for listing Model Bundles. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for listing Model Bundles. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_bundles** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListModelEndpointsV1Response.md b/docs/models/ListModelEndpointsV1Response.md deleted file mode 100644 index e0c8f0a3..00000000 --- a/docs/models/ListModelEndpointsV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.list_model_endpoints_v1_response.ListModelEndpointsV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_endpoints** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ListTriggersV1Response.md b/docs/models/ListTriggersV1Response.md deleted file mode 100644 index 8da56be0..00000000 --- a/docs/models/ListTriggersV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.list_triggers_v1_response.ListTriggersV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**triggers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Logprobs.md b/docs/models/Logprobs.md deleted file mode 100644 index afd996ab..00000000 --- a/docs/models/Logprobs.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.logprobs.Logprobs - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of message refusal tokens with log probability information. | -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of message content tokens with log probability information. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Logprobs2.md b/docs/models/Logprobs2.md deleted file mode 100644 index de11800c..00000000 --- a/docs/models/Logprobs2.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.logprobs2.Logprobs2 - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**text_offset** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**token_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Metadata.md b/docs/models/Metadata.md deleted file mode 100644 index ed7ad8fd..00000000 --- a/docs/models/Metadata.md +++ /dev/null @@ -1,14 +0,0 @@ -# launch.api_client.model.metadata.Metadata - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelBundleEnvironmentParams.md b/docs/models/ModelBundleEnvironmentParams.md deleted file mode 100644 index 27a3168f..00000000 --- a/docs/models/ModelBundleEnvironmentParams.md +++ /dev/null @@ -1,21 +0,0 @@ -# launch.api_client.model.model_bundle_environment_params.ModelBundleEnvironmentParams - -This is the entity-layer class for the Model Bundle environment parameters. Being an entity-layer class, it should be a plain data object. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle environment parameters. Being an entity-layer class, it should be a plain data object. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**framework_type** | [**ModelBundleFrameworkType**](ModelBundleFrameworkType.md) | [**ModelBundleFrameworkType**](ModelBundleFrameworkType.md) | | -**pytorch_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**tensorflow_version** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**ecr_repo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelBundleFrameworkType.md b/docs/models/ModelBundleFrameworkType.md deleted file mode 100644 index 432d5097..00000000 --- a/docs/models/ModelBundleFrameworkType.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.model_bundle_framework_type.ModelBundleFrameworkType - -The canonical list of possible machine learning frameworks of Model Bundles. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible machine learning frameworks of Model Bundles. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelBundleOrderBy.md b/docs/models/ModelBundleOrderBy.md deleted file mode 100644 index 846cb988..00000000 --- a/docs/models/ModelBundleOrderBy.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.model_bundle_order_by.ModelBundleOrderBy - -The canonical list of possible orderings of Model Bundles. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible orderings of Model Bundles. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelBundlePackagingType.md b/docs/models/ModelBundlePackagingType.md deleted file mode 100644 index c3fbbfd1..00000000 --- a/docs/models/ModelBundlePackagingType.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.model_bundle_packaging_type.ModelBundlePackagingType - -The canonical list of possible packaging types for Model Bundles. These values broadly determine how the model endpoint will obtain its code & dependencies. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible packaging types for Model Bundles. These values broadly determine how the model endpoint will obtain its code & dependencies. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelBundleV1Response.md b/docs/models/ModelBundleV1Response.md deleted file mode 100644 index a10e7905..00000000 --- a/docs/models/ModelBundleV1Response.md +++ /dev/null @@ -1,27 +0,0 @@ -# launch.api_client.model.model_bundle_v1_response.ModelBundleV1Response - -Response object for a single Model Bundle. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for a single Model Bundle. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_artifact_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**packaging_type** | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time -**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**env_params** | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | | -**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelBundleV2Response.md b/docs/models/ModelBundleV2Response.md deleted file mode 100644 index 25fb8517..00000000 --- a/docs/models/ModelBundleV2Response.md +++ /dev/null @@ -1,23 +0,0 @@ -# launch.api_client.model.model_bundle_v2_response.ModelBundleV2Response - -Response object for a single Model Bundle. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for a single Model Bundle. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_artifact_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelDownloadRequest.md b/docs/models/ModelDownloadRequest.md deleted file mode 100644 index 1fe07b7c..00000000 --- a/docs/models/ModelDownloadRequest.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.model_download_request.ModelDownloadRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name of the fine tuned model | -**download_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Format that you want the downloaded urls to be compatible with. Currently only supports hugging_face | [optional] if omitted the server will use the default value of hugging_face -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelDownloadResponse.md b/docs/models/ModelDownloadResponse.md deleted file mode 100644 index 88e55683..00000000 --- a/docs/models/ModelDownloadResponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.model_download_response.ModelDownloadResponse - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**urls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary of (file_name, url) pairs to download the model from. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelEndpointDeploymentState.md b/docs/models/ModelEndpointDeploymentState.md deleted file mode 100644 index a69334d1..00000000 --- a/docs/models/ModelEndpointDeploymentState.md +++ /dev/null @@ -1,22 +0,0 @@ -# launch.api_client.model.model_endpoint_deployment_state.ModelEndpointDeploymentState - -This is the entity-layer class for the deployment settings related to a Model Endpoint. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the deployment settings related to a Model Endpoint. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**available_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**unavailable_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelEndpointOrderBy.md b/docs/models/ModelEndpointOrderBy.md deleted file mode 100644 index 23a43e36..00000000 --- a/docs/models/ModelEndpointOrderBy.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.model_endpoint_order_by.ModelEndpointOrderBy - -The canonical list of possible orderings of Model Bundles. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible orderings of Model Bundles. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelEndpointResourceState.md b/docs/models/ModelEndpointResourceState.md deleted file mode 100644 index 127d153a..00000000 --- a/docs/models/ModelEndpointResourceState.md +++ /dev/null @@ -1,23 +0,0 @@ -# launch.api_client.model.model_endpoint_resource_state.ModelEndpointResourceState - -This is the entity-layer class for the resource settings per worker of a Model Endpoint. Note: in the multinode case, there are multiple \"nodes\" per \"worker\". \"Nodes\" is analogous to a single k8s pod that may take up all the GPUs on a single machine. \"Workers\" is the smallest unit that a request can be made to, and consists of one leader \"node\" and multiple follower \"nodes\" (named \"worker\" in the k8s LeaderWorkerSet definition). cpus/gpus/memory/storage are per-node, thus the total consumption by a \"worker\" is cpus/gpus/etc. multiplied by nodes_per_worker. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the resource settings per worker of a Model Endpoint. Note: in the multinode case, there are multiple \"nodes\" per \"worker\". \"Nodes\" is analogous to a single k8s pod that may take up all the GPUs on a single machine. \"Workers\" is the smallest unit that a request can be made to, and consists of one leader \"node\" and multiple follower \"nodes\" (named \"worker\" in the k8s LeaderWorkerSet definition). cpus/gpus/memory/storage are per-node, thus the total consumption by a \"worker\" is cpus/gpus/etc. multiplied by nodes_per_worker. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelEndpointStatus.md b/docs/models/ModelEndpointStatus.md deleted file mode 100644 index 5fbb86e3..00000000 --- a/docs/models/ModelEndpointStatus.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.model_endpoint_status.ModelEndpointStatus - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ModelEndpointType.md b/docs/models/ModelEndpointType.md deleted file mode 100644 index 7b4ef324..00000000 --- a/docs/models/ModelEndpointType.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.model_endpoint_type.ModelEndpointType - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ParallelToolCalls.md b/docs/models/ParallelToolCalls.md deleted file mode 100644 index 6e1a6e56..00000000 --- a/docs/models/ParallelToolCalls.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.parallel_tool_calls.ParallelToolCalls - -Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/PredictionContent.md b/docs/models/PredictionContent.md deleted file mode 100644 index ee67de3c..00000000 --- a/docs/models/PredictionContent.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.prediction_content.PredictionContent - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the predicted content you want to provide. This type is currently always `content`. | must be one of ["content", ] -**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The content that should be matched when generating a model response. If generated tokens would match this content, the entire model response can be returned much more quickly. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Prompt.md b/docs/models/Prompt.md deleted file mode 100644 index 8386200e..00000000 --- a/docs/models/Prompt.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.prompt.Prompt - -The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | if omitted the server will use the default value of <|endoftext|> - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Prompt1.md b/docs/models/Prompt1.md deleted file mode 100644 index 60a7c0c4..00000000 --- a/docs/models/Prompt1.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.prompt1.Prompt1 - -The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | if omitted the server will use the default value of <|endoftext|> - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Prompt1Item.md b/docs/models/Prompt1Item.md deleted file mode 100644 index e52fe744..00000000 --- a/docs/models/Prompt1Item.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.prompt1_item.Prompt1Item - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/PromptTokensDetails.md b/docs/models/PromptTokensDetails.md deleted file mode 100644 index 5c8aa2e9..00000000 --- a/docs/models/PromptTokensDetails.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.prompt_tokens_details.PromptTokensDetails - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**audio_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Audio input tokens present in the prompt. | [optional] if omitted the server will use the default value of 0 -**cached_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Cached tokens present in the prompt. | [optional] if omitted the server will use the default value of 0 -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/PytorchFramework.md b/docs/models/PytorchFramework.md deleted file mode 100644 index a41c6bf7..00000000 --- a/docs/models/PytorchFramework.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.pytorch_framework.PytorchFramework - -This is the entity-layer class for a Pytorch framework specification. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a Pytorch framework specification. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**pytorch_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**framework_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["pytorch", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/Quantization.md b/docs/models/Quantization.md deleted file mode 100644 index c8518147..00000000 --- a/docs/models/Quantization.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.quantization.Quantization - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ReasoningEffort.md b/docs/models/ReasoningEffort.md deleted file mode 100644 index 80470a58..00000000 --- a/docs/models/ReasoningEffort.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.reasoning_effort.ReasoningEffort - -**o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. | if omitted the server will use the default value of medium - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/RequestSchema.md b/docs/models/RequestSchema.md deleted file mode 100644 index 94ff7eb4..00000000 --- a/docs/models/RequestSchema.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.request_schema.RequestSchema - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ResponseFormatJsonObject.md b/docs/models/ResponseFormatJsonObject.md deleted file mode 100644 index 0bde48f3..00000000 --- a/docs/models/ResponseFormatJsonObject.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.response_format_json_object.ResponseFormatJsonObject - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of response format being defined. Always `json_object`. | must be one of ["json_object", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ResponseFormatJsonSchema.md b/docs/models/ResponseFormatJsonSchema.md deleted file mode 100644 index f8b99f8c..00000000 --- a/docs/models/ResponseFormatJsonSchema.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.response_format_json_schema.ResponseFormatJsonSchema - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**json_schema** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Structured Outputs configuration options, including a JSON Schema. | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of response format being defined. Always `json_schema`. | must be one of ["json_schema", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ResponseFormatJsonSchemaSchema.md b/docs/models/ResponseFormatJsonSchemaSchema.md deleted file mode 100644 index 2d4fd497..00000000 --- a/docs/models/ResponseFormatJsonSchemaSchema.md +++ /dev/null @@ -1,14 +0,0 @@ -# launch.api_client.model.response_format_json_schema_schema.ResponseFormatJsonSchemaSchema - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ResponseFormatText.md b/docs/models/ResponseFormatText.md deleted file mode 100644 index 5ab6cd35..00000000 --- a/docs/models/ResponseFormatText.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.response_format_text.ResponseFormatText - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of response format being defined. Always `text`. | must be one of ["text", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ResponseModalities.md b/docs/models/ResponseModalities.md deleted file mode 100644 index cd0d308d..00000000 --- a/docs/models/ResponseModalities.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.response_modalities.ResponseModalities - -Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ResponseSchema.md b/docs/models/ResponseSchema.md deleted file mode 100644 index 59b49aea..00000000 --- a/docs/models/ResponseSchema.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.response_schema.ResponseSchema - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/RestartModelEndpointV1Response.md b/docs/models/RestartModelEndpointV1Response.md deleted file mode 100644 index af9200e2..00000000 --- a/docs/models/RestartModelEndpointV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.restart_model_endpoint_v1_response.RestartModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**restarted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/RunnableImageFlavor.md b/docs/models/RunnableImageFlavor.md deleted file mode 100644 index 1ef098b7..00000000 --- a/docs/models/RunnableImageFlavor.md +++ /dev/null @@ -1,30 +0,0 @@ -# launch.api_client.model.runnable_image_flavor.RunnableImageFlavor - -This is the entity-layer class for the Model Bundle flavor of a runnable image. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle flavor of a runnable image. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["runnable_image", ] -**protocol** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["http", ] -**tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /predict -**healthcheck_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /readyz -**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 120 -**extra_routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**forwarder_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of default -**worker_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**worker_env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ServiceTier.md b/docs/models/ServiceTier.md deleted file mode 100644 index 2c1ac1ba..00000000 --- a/docs/models/ServiceTier.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.service_tier.ServiceTier - -Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. | if omitted the server will use the default value of auto - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/StopConfiguration.md b/docs/models/StopConfiguration.md deleted file mode 100644 index 5efb0007..00000000 --- a/docs/models/StopConfiguration.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.stop_configuration.StopConfiguration - -Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/StopConfiguration1.md b/docs/models/StopConfiguration1.md deleted file mode 100644 index 98464186..00000000 --- a/docs/models/StopConfiguration1.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.stop_configuration1.StopConfiguration1 - -Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/StreamError.md b/docs/models/StreamError.md deleted file mode 100644 index 4ba929e1..00000000 --- a/docs/models/StreamError.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.stream_error.StreamError - -Error object for a stream prompt completion task. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Error object for a stream prompt completion task. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**status_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**content** | [**StreamErrorContent**](StreamErrorContent.md) | [**StreamErrorContent**](StreamErrorContent.md) | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/StreamErrorContent.md b/docs/models/StreamErrorContent.md deleted file mode 100644 index 4f7f0323..00000000 --- a/docs/models/StreamErrorContent.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.stream_error_content.StreamErrorContent - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**error** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**timestamp** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/StreamingEnhancedRunnableImageFlavor.md b/docs/models/StreamingEnhancedRunnableImageFlavor.md deleted file mode 100644 index b9a67753..00000000 --- a/docs/models/StreamingEnhancedRunnableImageFlavor.md +++ /dev/null @@ -1,32 +0,0 @@ -# launch.api_client.model.streaming_enhanced_runnable_image_flavor.StreamingEnhancedRunnableImageFlavor - -For deployments that expose a streaming route in a container. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | For deployments that expose a streaming route in a container. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["streaming_enhanced_runnable_image", ] -**protocol** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["http", ] -**tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**streaming_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of [] -**predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /predict -**healthcheck_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /readyz -**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 120 -**extra_routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**forwarder_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of default -**worker_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**worker_env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**streaming_predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /stream -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/SyncEndpointPredictV1Request.md b/docs/models/SyncEndpointPredictV1Request.md deleted file mode 100644 index 96e31f9b..00000000 --- a/docs/models/SyncEndpointPredictV1Request.md +++ /dev/null @@ -1,23 +0,0 @@ -# launch.api_client.model.sync_endpoint_predict_v1_request.SyncEndpointPredictV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cloudpickle** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**return_pickled** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**destination_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**timeout_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_retries** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/SyncEndpointPredictV1Response.md b/docs/models/SyncEndpointPredictV1Response.md deleted file mode 100644 index 3c0779e1..00000000 --- a/docs/models/SyncEndpointPredictV1Response.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.sync_endpoint_predict_v1_response.SyncEndpointPredictV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**status** | [**TaskStatus**](TaskStatus.md) | [**TaskStatus**](TaskStatus.md) | | -**result** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**traceback** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**status_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/TaskStatus.md b/docs/models/TaskStatus.md deleted file mode 100644 index 69a16961..00000000 --- a/docs/models/TaskStatus.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.task_status.TaskStatus - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/TensorflowFramework.md b/docs/models/TensorflowFramework.md deleted file mode 100644 index 2c40ef1a..00000000 --- a/docs/models/TensorflowFramework.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.tensorflow_framework.TensorflowFramework - -This is the entity-layer class for a Tensorflow framework specification. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a Tensorflow framework specification. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**tensorflow_version** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**framework_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["tensorflow", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/TokenOutput.md b/docs/models/TokenOutput.md deleted file mode 100644 index 14acb368..00000000 --- a/docs/models/TokenOutput.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.token_output.TokenOutput - -Detailed token information. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Detailed token information. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**log_prob** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**token** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ToolConfig.md b/docs/models/ToolConfig.md deleted file mode 100644 index d9c98248..00000000 --- a/docs/models/ToolConfig.md +++ /dev/null @@ -1,20 +0,0 @@ -# launch.api_client.model.tool_config.ToolConfig - -Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**max_iterations** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 10 -**execution_timeout_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 60 -**should_retry_on_error** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/TopLogprob.md b/docs/models/TopLogprob.md deleted file mode 100644 index 250b4c3f..00000000 --- a/docs/models/TopLogprob.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.top_logprob.TopLogprob - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**logprob** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | -**bytes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. | -**token** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The token. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/TritonEnhancedRunnableImageFlavor.md b/docs/models/TritonEnhancedRunnableImageFlavor.md deleted file mode 100644 index 79bd1687..00000000 --- a/docs/models/TritonEnhancedRunnableImageFlavor.md +++ /dev/null @@ -1,37 +0,0 @@ -# launch.api_client.model.triton_enhanced_runnable_image_flavor.TritonEnhancedRunnableImageFlavor - -For deployments that require tritonserver running in a container. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | For deployments that require tritonserver running in a container. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["triton_enhanced_runnable_image", ] -**protocol** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["http", ] -**tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**triton_commit_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**triton_model_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**triton_num_cpu** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /predict -**healthcheck_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /readyz -**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 120 -**extra_routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**forwarder_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of default -**worker_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**worker_env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**triton_model_replicas** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**triton_storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**triton_memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**triton_readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 300 -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateBatchCompletionsV2Request.md b/docs/models/UpdateBatchCompletionsV2Request.md deleted file mode 100644 index 4a93861c..00000000 --- a/docs/models/UpdateBatchCompletionsV2Request.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.update_batch_completions_v2_request.UpdateBatchCompletionsV2Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the batch completions job | -**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateBatchCompletionsV2Response.md b/docs/models/UpdateBatchCompletionsV2Response.md deleted file mode 100644 index 46b333b3..00000000 --- a/docs/models/UpdateBatchCompletionsV2Response.md +++ /dev/null @@ -1,25 +0,0 @@ -# launch.api_client.model.update_batch_completions_v2_response.UpdateBatchCompletionsV2Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**completed_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**expires_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**model_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model configuration for the batch inference. Hardware configurations are inferred. | -**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether the update was successful | -**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | -**status** | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | | -**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] -**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateBatchJobV1Request.md b/docs/models/UpdateBatchJobV1Request.md deleted file mode 100644 index 598ec313..00000000 --- a/docs/models/UpdateBatchJobV1Request.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.update_batch_job_v1_request.UpdateBatchJobV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**cancel** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateBatchJobV1Response.md b/docs/models/UpdateBatchJobV1Response.md deleted file mode 100644 index 98c8a03c..00000000 --- a/docs/models/UpdateBatchJobV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.update_batch_job_v1_response.UpdateBatchJobV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateDeepSpeedModelEndpointRequest.md b/docs/models/UpdateDeepSpeedModelEndpointRequest.md deleted file mode 100644 index db8ca526..00000000 --- a/docs/models/UpdateDeepSpeedModelEndpointRequest.md +++ /dev/null @@ -1,43 +0,0 @@ -# launch.api_client.model.update_deep_speed_model_endpoint_request.UpdateDeepSpeedModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["deepspeed", ] if omitted the server will use the default value of deepspeed -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateDockerImageBatchJobV1Request.md b/docs/models/UpdateDockerImageBatchJobV1Request.md deleted file mode 100644 index 275be51b..00000000 --- a/docs/models/UpdateDockerImageBatchJobV1Request.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.update_docker_image_batch_job_v1_request.UpdateDockerImageBatchJobV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**cancel** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateDockerImageBatchJobV1Response.md b/docs/models/UpdateDockerImageBatchJobV1Response.md deleted file mode 100644 index 83e0b3ce..00000000 --- a/docs/models/UpdateDockerImageBatchJobV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.update_docker_image_batch_job_v1_response.UpdateDockerImageBatchJobV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateLLMModelEndpointV1Request.md b/docs/models/UpdateLLMModelEndpointV1Request.md deleted file mode 100644 index 8a540084..00000000 --- a/docs/models/UpdateLLMModelEndpointV1Request.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.update_llm_model_endpoint_v1_request.UpdateLLMModelEndpointV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateLLMModelEndpointV1Response.md b/docs/models/UpdateLLMModelEndpointV1Response.md deleted file mode 100644 index eb32cf92..00000000 --- a/docs/models/UpdateLLMModelEndpointV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.update_llm_model_endpoint_v1_response.UpdateLLMModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateModelEndpointV1Request.md b/docs/models/UpdateModelEndpointV1Request.md deleted file mode 100644 index 40785371..00000000 --- a/docs/models/UpdateModelEndpointV1Request.md +++ /dev/null @@ -1,34 +0,0 @@ -# launch.api_client.model.update_model_endpoint_v1_request.UpdateModelEndpointV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateModelEndpointV1Response.md b/docs/models/UpdateModelEndpointV1Response.md deleted file mode 100644 index 52afaefe..00000000 --- a/docs/models/UpdateModelEndpointV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.update_model_endpoint_v1_response.UpdateModelEndpointV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateSGLangModelEndpointRequest.md b/docs/models/UpdateSGLangModelEndpointRequest.md deleted file mode 100644 index 332f60b6..00000000 --- a/docs/models/UpdateSGLangModelEndpointRequest.md +++ /dev/null @@ -1,131 +0,0 @@ -# launch.api_client.model.update_sg_lang_model_endpoint_request.UpdateSGLangModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["sglang", ] if omitted the server will use the default value of sglang -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false -**tp_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tensor parallel size. | [optional] -**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, skip init tokenizer and pass input_ids in generate request | [optional] -**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. | [optional] -**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. | [optional] -**kv_cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. \"auto\" will use model data type. | [optional] -**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the JSON file containing the KV cache scaling factors. | [optional] -**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The quantization method. | [optional] -**context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model's maximum context length. | [optional] -**device** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The device type. | [optional] -**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Override the model name returned by the v1/models endpoint in OpenAI API server. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The builtin chat template name or path of the chat template file. | [optional] -**is_embedding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use a CausalLM as an embedding model. | [optional] -**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. | [optional] -**mem_fraction_static** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The fraction of the memory used for static allocation. | [optional] -**max_running_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of running requests. | [optional] -**max_total_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in the memory pool. | [optional] -**chunked_prefill_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a chunk for the chunked prefill. | [optional] -**max_prefill_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a prefill batch. | [optional] -**schedule_policy** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The scheduling policy of the requests. | [optional] -**schedule_conservativeness** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How conservative the schedule policy is. | [optional] -**cpu_offload_gb** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many GBs of RAM to reserve for CPU offloading | [optional] -**prefill_only_one_req** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, we only prefill one request at one prefill batch | [optional] -**stream_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The interval for streaming in terms of the token length. | [optional] -**random_seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The random seed. | [optional] -**constrained_json_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex pattern for syntactic whitespaces allowed in JSON constrained output. | [optional] -**watchdog_timeout** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set watchdog timeout in seconds. | [optional] -**download_dir** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model download directory. | [optional] -**base_gpu_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The base GPU ID to start allocating GPUs from. | [optional] -**log_level** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of all loggers. | [optional] -**log_level_http** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of HTTP server. | [optional] -**log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Log the inputs and outputs of all requests. | [optional] -**show_time_cost** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Show time cost of custom marks. | [optional] -**enable_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable log prometheus metrics. | [optional] -**decode_log_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log interval of decode batch. | [optional] -**api_key** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set API key of the server. | [optional] -**file_storage_pth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the file storage in backend. | [optional] -**enable_cache_report** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Return number of cached tokens in usage.prompt_tokens_details. | [optional] -**data_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The data parallelism size. | [optional] -**load_balance_method** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The load balancing strategy for data parallelism. | [optional] -**expert_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The expert parallelism size. | [optional] -**dist_init_addr** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The host address for initializing distributed backend. | [optional] -**nnodes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of nodes. | [optional] -**node_rank** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The node rank. | [optional] -**json_model_override_args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A dictionary in JSON string format used to override default model configurations. | [optional] -**lora_paths** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The list of LoRA adapters. | [optional] -**max_loras_per_batch** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of adapters for a running batch. | [optional] -**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for attention layers. | [optional] -**sampling_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for sampling layers. | [optional] -**grammar_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the backend for grammar-guided decoding. | [optional] -**speculative_algorithm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Speculative algorithm. | [optional] -**speculative_draft_model_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the draft model weights. | [optional] -**speculative_num_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of steps sampled from draft model in Speculative Decoding. | [optional] -**speculative_num_draft_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in Speculative Decoding. | [optional] -**speculative_eagle_topk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in eagle2 each step. | [optional] -**enable_double_sparsity** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable double sparsity attention | [optional] -**ds_channel_config_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the double sparsity channel config | [optional] -**ds_heavy_channel_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy channels in double sparsity attention | [optional] -**ds_heavy_token_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy tokens in double sparsity attention | [optional] -**ds_heavy_channel_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of heavy channels in double sparsity attention | [optional] -**ds_sparse_decode_threshold** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The threshold for sparse decoding in double sparsity attention | [optional] -**disable_radix_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable RadixAttention for prefix caching. | [optional] -**disable_jump_forward** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable jump-forward for grammar-guided decoding. | [optional] -**disable_cuda_graph** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph. | [optional] -**disable_cuda_graph_padding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph when padding is needed. | [optional] -**disable_outlines_disk_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable disk cache of outlines. | [optional] -**disable_custom_all_reduce** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the custom all-reduce kernel. | [optional] -**disable_mla** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable Multi-head Latent Attention (MLA) for DeepSeek-V2. | [optional] -**disable_overlap_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the overlap scheduler. | [optional] -**enable_mixed_chunk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable mixing prefill and decode in a batch when using chunked prefill. | [optional] -**enable_dp_attention** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable data parallelism for attention and tensor parallelism for FFN. | [optional] -**enable_ep_moe** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable expert parallelism for moe. | [optional] -**enable_torch_compile** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torch.compile. | [optional] -**torch_compile_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size when using torch compile. | [optional] -**cuda_graph_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size for cuda graph. | [optional] -**cuda_graph_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the list of batch sizes for cuda graph. | [optional] -**torchao_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torchao. | [optional] -**enable_nan_detection** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable the NaN detection for debugging purposes. | [optional] -**enable_p2p_check** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable P2P check for GPU access. | [optional] -**triton_attention_reduce_in_fp32** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Cast the intermediate attention results to fp32. | [optional] -**triton_attention_num_kv_splits** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of KV splits in flash decoding Triton kernel. | [optional] -**num_continuous_decode_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Run multiple continuous decoding steps to reduce scheduling overhead. | [optional] -**delete_ckpt_after_loading** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Delete the model checkpoint after loading the model. | [optional] -**enable_memory_saver** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow saving memory using release_memory_occupation and resume_memory_occupation | [optional] -**allow_auto_truncate** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow automatically truncating requests that exceed the maximum input length. | [optional] -**enable_custom_logit_processor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable users to pass custom logit processors to the server. | [optional] -**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specify the parser for handling tool-call interactions. | [optional] -**huggingface_repo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Hugging Face repository ID. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md b/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md deleted file mode 100644 index 4e274db2..00000000 --- a/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md +++ /dev/null @@ -1,43 +0,0 @@ -# launch.api_client.model.update_text_generation_inference_model_endpoint_request.UpdateTextGenerationInferenceModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["text_generation_inference", ] if omitted the server will use the default value of text_generation_inference -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateTriggerV1Request.md b/docs/models/UpdateTriggerV1Request.md deleted file mode 100644 index f3fd0c9d..00000000 --- a/docs/models/UpdateTriggerV1Request.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.update_trigger_v1_request.UpdateTriggerV1Request - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**cron_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**suspend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateTriggerV1Response.md b/docs/models/UpdateTriggerV1Response.md deleted file mode 100644 index 9209cc28..00000000 --- a/docs/models/UpdateTriggerV1Response.md +++ /dev/null @@ -1,15 +0,0 @@ -# launch.api_client.model.update_trigger_v1_response.UpdateTriggerV1Response - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UpdateVLLMModelEndpointRequest.md b/docs/models/UpdateVLLMModelEndpointRequest.md deleted file mode 100644 index e1a43566..00000000 --- a/docs/models/UpdateVLLMModelEndpointRequest.md +++ /dev/null @@ -1,81 +0,0 @@ -# launch.api_client.model.update_vllm_model_endpoint_request.UpdateVLLMModelEndpointRequest - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] -**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] -**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true -**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false -**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] -**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["vllm", ] if omitted the server will use the default value of vllm -**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false -**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**max_gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig | [optional] -**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Attention backend to use for vLLM. Default to None. | [optional] -**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] -**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] -**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] -**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false -**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] -**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] -**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] -**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] -**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] -**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] -**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] -**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] -**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] -**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] -**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] -**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] -**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] -**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] -**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for reproducibility. | [optional] -**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] -**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] -**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] -**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] -**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] -**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] -**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] -**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] -**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] -**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] -**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] -**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] -**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] -**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] -**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UploadFileResponse.md b/docs/models/UploadFileResponse.md deleted file mode 100644 index 3dd7eefc..00000000 --- a/docs/models/UploadFileResponse.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.upload_file_response.UploadFileResponse - -Response object for uploading a file. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for uploading a file. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the uploaded file. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UrlCitation.md b/docs/models/UrlCitation.md deleted file mode 100644 index b869f9e1..00000000 --- a/docs/models/UrlCitation.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.url_citation.UrlCitation - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**start_index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the first character of the URL citation in the message. | -**end_index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the last character of the URL citation in the message. | -**title** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The title of the web resource. | -**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The URL of the web resource. | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/UserLocation.md b/docs/models/UserLocation.md deleted file mode 100644 index f0323db4..00000000 --- a/docs/models/UserLocation.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.user_location.UserLocation - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**approximate** | [**WebSearchLocation**](WebSearchLocation.md) | [**WebSearchLocation**](WebSearchLocation.md) | | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of location approximation. Always `approximate`. | must be one of ["approximate", ] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ValidationError.md b/docs/models/ValidationError.md deleted file mode 100644 index 1d560469..00000000 --- a/docs/models/ValidationError.md +++ /dev/null @@ -1,17 +0,0 @@ -# launch.api_client.model.validation_error.ValidationError - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**msg** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**loc** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/VoiceIdsShared.md b/docs/models/VoiceIdsShared.md deleted file mode 100644 index b0a19a72..00000000 --- a/docs/models/VoiceIdsShared.md +++ /dev/null @@ -1,9 +0,0 @@ -# launch.api_client.model.voice_ids_shared.VoiceIdsShared - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/WebSearchContextSize.md b/docs/models/WebSearchContextSize.md deleted file mode 100644 index c19ce47b..00000000 --- a/docs/models/WebSearchContextSize.md +++ /dev/null @@ -1,11 +0,0 @@ -# launch.api_client.model.web_search_context_size.WebSearchContextSize - -High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default. | - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/WebSearchLocation.md b/docs/models/WebSearchLocation.md deleted file mode 100644 index 2ea05b5b..00000000 --- a/docs/models/WebSearchLocation.md +++ /dev/null @@ -1,18 +0,0 @@ -# launch.api_client.model.web_search_location.WebSearchLocation - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**country** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`. | [optional] -**region** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Free text input for the region of the user, e.g. `California`. | [optional] -**city** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Free text input for the city of the user, e.g. `San Francisco`. | [optional] -**timezone** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`. | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/WebSearchOptions.md b/docs/models/WebSearchOptions.md deleted file mode 100644 index 9e79c6be..00000000 --- a/docs/models/WebSearchOptions.md +++ /dev/null @@ -1,16 +0,0 @@ -# launch.api_client.model.web_search_options.WebSearchOptions - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**user_location** | [**UserLocation**](UserLocation.md) | [**UserLocation**](UserLocation.md) | Approximate location parameters for the search. | [optional] -**search_context_size** | [**WebSearchContextSize**](WebSearchContextSize.md) | [**WebSearchContextSize**](WebSearchContextSize.md) | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/models/ZipArtifactFlavor.md b/docs/models/ZipArtifactFlavor.md deleted file mode 100644 index f263d10f..00000000 --- a/docs/models/ZipArtifactFlavor.md +++ /dev/null @@ -1,23 +0,0 @@ -# launch.api_client.model.zip_artifact_flavor.ZipArtifactFlavor - -This is the entity-layer class for the Model Bundle flavor of a zip artifact. - -## Model Type Info -Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- -dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle flavor of a zip artifact. | - -### Dictionary Keys -Key | Input Type | Accessed Type | Description | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["zip_artifact", ] -**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**load_model_fn_module_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**load_predict_fn_module_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | -**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] -**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] - -[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) - diff --git a/docs/plugins.py b/docs/plugins.py deleted file mode 100644 index aa67a03e..00000000 --- a/docs/plugins.py +++ /dev/null @@ -1,58 +0,0 @@ -import logging -import os -import re - -from mkdocs.config import Config -from mkdocs.structure.files import Files -from mkdocs.structure.pages import Page - -logger = logging.getLogger("mkdocs.plugin") - - -def on_pre_build(config: Config): - """ - Not doing anything here anymore. - """ - pass - - -def on_files(files: Files, config: Config) -> Files: - return remove_files(files) - - -def remove_files(files: Files) -> Files: - to_remove = [] - for file in files: - if file.src_path in {"plugins.py", "cli_help.txt"}: - to_remove.append(file) - elif file.src_path.startswith("__pycache__/"): - to_remove.append(file) - - logger.debug("removing files: %s", [f.src_path for f in to_remove]) - for f in to_remove: - files.remove(f) - - return files - - -def on_page_markdown(markdown: str, page: Page, config: Config, files: Files) -> str: - return reinstate_code_titles(markdown) - - -def reinstate_code_titles(markdown: str) -> str: - """ - Fix titles in code blocks, see https://youtrack.jetbrains.com/issue/PY-53246. - """ - return re.sub(r"^(```py)\s*\ntitle=", r"\1 title=", markdown, flags=re.M) - - -def add_version(markdown: str, page: Page) -> str: - if page.abs_url == "/": - version_ref = os.getenv("GITHUB_REF") - if version_ref: - version = re.sub("^refs/tags/", "", version_ref.lower()) - version_str = f"Documentation for version: **{version}**" - else: - version_str = "Documentation for development version" - markdown = re.sub(r"{{ *version *}}", version_str, markdown) - return markdown diff --git a/guides/custom_docker_images/index.html b/guides/custom_docker_images/index.html new file mode 100644 index 00000000..5f40bd7d --- /dev/null +++ b/guides/custom_docker_images/index.html @@ -0,0 +1,838 @@ + + + + + + + + + + + + + + + + + + + + + + + + Custom docker images - Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + +

Custom docker images

+
+

Warning

+

This feature is currently in beta, and the API is likely to change. Please contact us if you are interested +in using this feature.

+
+

If you need more customization that what cloudpickle or zip artifacts can offer, or if you just already have a pre-built +docker image, then you can create a Model Bundle with that docker image. You will need to modify your image to run a +web server that exposes HTTP port 5005.

+

In our example below, we assume that you have some existing Python function my_inference_fn that can be imported. +If you need to invoke some other binary (e.g. a custom C++ binary), then you can shell out to the OS to call that binary; +subsequent versions of this document will have native examples for non-Python binaries.

+

For choice of web server, we recommend FastAPI due to its speed and ergonomics. +Any web server would work, although we give examples with FastAPI.

+

Step 1: Install Requirements

+

You can add fastapi and uvicorn to the requirements.txt file that gets installed as part of your Dockerfile. Alternatively, +you can add pip install fastapi uvicorn to the Dockerfile directly.

+

Step 2: Set up a web server application

+

Inside your project workspace, create a server.py file with these contents:

+
# test='skip'
+from fastapi import FastAPI
+
+from pydantic import BaseModel
+
+app = FastAPI()
+
+class MyRequestSchema(BaseModel):
+    url: str
+
+
+class MyResponseSchema(BaseModel):
+    response: str
+
+def my_inference_fn(req: MyRequestSchema) -> MyResponseSchema:
+    # This is an example inference function - you can instead import a function from your own codebase,
+    # or shell out to the OS, etc.
+    resp = req.url + "_hello"
+    return MyResponseSchema(response=resp)
+
+@app.post("/predict")
+async def predict(request: MyRequestSchema) -> MyResponseSchema:
+    response = my_inference_fn(request)
+    return response
+
+@app.get("/readyz")
+def readyz():
+    return "ok"
+
+

Step 3: Rebuild and push your image

+

Build your updated Dockerfile and push the image to a location that is accessible by Scale. For instance, if you are +using AWS ECR, please make sure that the necessary cross-account permissions allow Scale to pull your docker image.

+

Step 4: Deploy!

+

Now you can upload your docker image as a Model Bundle, and then create a Model Endpoint referencing that Model Bundle. Note that path.to.your.server.file:app in the command section below should be relative to the WORKDIR of your docker image.

+
# test='skip'
+import os
+
+from launch import LaunchClient
+
+from server import MyRequestSchema, MyResponseSchema  # Defined as part of your server.py
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+
+model_bundle_name = "my_bundle_name"
+
+client.create_model_bundle_from_runnable_image_v2(
+    model_bundle_name=model_bundle_name,
+    request_schema=MyRequestSchema,
+    response_schema=MyResponseSchema,
+    repository="$YOUR_ECR_REPO",
+    tag="$YOUR_IMAGE_TAG",
+    command=[
+        "dumb-init",
+        "--",
+        "uvicorn",
+        "path.to.your.server.file:app",
+        "--port",
+        "5005",
+        "--host",
+        "::",
+    ],
+    predict_route="/predict",
+    healthcheck_route="/readyz",
+    readiness_initial_delay_seconds=120,
+    env={},
+)
+
+client.create_model_endpoint(
+    endpoint_name=f"endpoint-{model_bundle_name}",
+    model_bundle=model_bundle_name,
+    endpoint_type="async",
+    min_workers=0,
+    max_workers=1,
+    per_worker=1,
+    memory="30Gi",
+    storage="40Gi",
+    cpus=4, # This must  be at least 2 because forwarding services consume 1 cpu.
+    gpus=1,
+    gpu_type="nvidia-ampere-a10",
+    update_if_exists=True,
+)
+
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 00000000..47ea7319 --- /dev/null +++ b/index.html @@ -0,0 +1,820 @@ + + + + + + + + + + + + + + + + + + + + + + + + Launch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Scale Launch

+

CI +pypi

+

Simple, scalable, and high performance ML service deployment in python.

+

Example

+
Launch Usage
import os
+import time
+from launch import LaunchClient
+from launch import EndpointRequest
+from pydantic import BaseModel
+from rich import print
+
+
+class MyRequestSchema(BaseModel):
+    x: int
+    y: str
+
+class MyResponseSchema(BaseModel):
+    __root__: int
+
+
+def my_load_predict_fn(model):
+    def returns_model_of_x_plus_len_of_y(x: int, y: str) -> int:
+        """MyRequestSchema -> MyResponseSchema"""
+        assert isinstance(x, int) and isinstance(y, str)
+        return model(x) + len(y)
+
+    return returns_model_of_x_plus_len_of_y
+
+
+def my_load_model_fn():
+    def my_model(x):
+        return x * 2
+
+    return my_model
+
+BUNDLE_PARAMS = {
+    "model_bundle_name": "test-bundle",
+    "load_predict_fn": my_load_predict_fn,
+    "load_model_fn": my_load_model_fn,
+    "request_schema": MyRequestSchema,
+    "response_schema": MyResponseSchema,
+    "requirements": ["pytest==7.2.1", "numpy"],  # list your requirements here
+    "pytorch_image_tag": "1.7.1-cuda11.0-cudnn8-runtime",
+}
+
+ENDPOINT_PARAMS = {
+    "endpoint_name": "demo-endpoint",
+    "model_bundle": "test-bundle",
+    "cpus": 1,
+    "min_workers": 0,
+    "endpoint_type": "async",
+    "update_if_exists": True,
+    "labels": {
+        "team": "MY_TEAM",
+        "product": "launch",
+    }
+}
+
+def predict_on_endpoint(request: MyRequestSchema) -> MyResponseSchema:
+    # Wait for the endpoint to be ready first before submitting a task
+    endpoint = client.get_model_endpoint(endpoint_name="demo-endpoint")
+    while endpoint.status() != "READY":
+        time.sleep(10)
+
+    endpoint_request = EndpointRequest(args=request.dict(), return_pickled=False)
+
+    future = endpoint.predict(request=endpoint_request)
+    raw_response = future.get()
+
+    response = MyResponseSchema.parse_raw(raw_response.result)
+    return response
+
+
+client = LaunchClient(api_key=os.getenv("LAUNCH_API_KEY"))
+
+client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS)
+endpoint = client.create_model_endpoint(**ENDPOINT_PARAMS)
+
+request = MyRequestSchema(x=5, y="hello")
+response = predict_on_endpoint(request)
+print(response)
+"""
+MyResponseSchema(__root__=10)
+"""
+
+

What's going on here:

+
    +
  • First we use pydantic to define our request and response + schemas, MyRequestSchema and MyResponseSchema. These schemas are used to generate the API + documentation for our models.
  • +
  • Next we define the the model and the load_predict_fn, which tells Launch + how to load our model and how to make predictions with it. In this case, + we're just returning a function that adds the length of the string y to + model(x), where model doubles the integer x.
  • +
  • We then define the model bundle by specifying the load_predict_fn, the request_schema, and the + response_schema. We also specify the env_params, which tell Launch environment settings like + the base image to use. In this case, we're using a PyTorch image.
  • +
  • Next, we create the model endpoint, which is the API that we'll use to make predictions. We + specify the model_bundle that we created above, and we specify the endpoint_type, which tells + Launch whether to use a synchronous or asynchronous endpoint. In this case, we're using an + asynchronous endpoint, which means that we can make predictions and return immediately with a + future object. We can then use the future object to get the prediction result later.
  • +
  • Finally, we make a prediction by calling predict_on_endpoint with a MyRequestSchema object. + This function first waits for the endpoint to be ready, then it submits a prediction request to + the endpoint. It then waits for the prediction result and returns it.
  • +
+

Notice that we specified min_workers=0, meaning that the endpoint will scale down to 0 workers +when it's not being used.

+

Installation

+

To use Scale Launch, first install it using pip:

+
Installation
pip install -U scale-launch
+
+ + + + + + +
+
+ + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/justfile b/justfile deleted file mode 100644 index 080e225b..00000000 --- a/justfile +++ /dev/null @@ -1,41 +0,0 @@ -# Client regeneration from llm-engine OpenAPI schema - -# Configuration -llm_engine_repo := "scaleapi/llm-engine" -default_branch := "main" -schema_path := "model-engine/specs/openapi-3.0.json" -generator_version := "6.4.0" - -# Fetch the OpenAPI schema from llm-engine repo -fetch-schema branch=default_branch: - @echo "Fetching OpenAPI 3.0 schema from {{llm_engine_repo}} (branch: {{branch}})..." - curl -sSL "https://raw.githubusercontent.com/{{llm_engine_repo}}/{{branch}}/{{schema_path}}" -o openapi.json - @echo "Schema saved to openapi.json" - -# Generate client code from openapi.json -generate: - #!/usr/bin/env bash - set -euo pipefail - if [ ! -f openapi.json ]; then - echo "Error: openapi.json not found. Run 'just fetch-schema' first." - exit 1 - fi - echo "Generating client with OpenAPI Generator {{generator_version}}..." - docker run --rm \ - -v "$(pwd):/local" \ - openapitools/openapi-generator-cli:v{{generator_version}} generate \ - -i /local/openapi.json \ - -g python \ - -o /local \ - --package-name launch.api_client \ - --additional-properties=generateSourceCodeOnly=true - echo "Client generated. Review changes with 'git diff'" - -# Fetch schema and regenerate client -regenerate branch=default_branch: (fetch-schema branch) generate - -# Show current schema source info -info: - @echo "Schema source: https://github.com/{{llm_engine_repo}}/blob/{{default_branch}}/{{schema_path}}" - @echo "Generator version: {{generator_version}}" - @test -f openapi.json && echo "Local schema: openapi.json (exists)" || echo "Local schema: openapi.json (not found)" diff --git a/launch/README.md b/launch/README.md deleted file mode 100644 index 26989fb0..00000000 --- a/launch/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Scale Launch - -Currently, Scale Launch is still being built out, so the contents of this library are subject to change. diff --git a/launch/__init__.py b/launch/__init__.py deleted file mode 100644 index 4bbcff12..00000000 --- a/launch/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -""" - -Scale Launch provides ML engineers with a simple Python interface for turning a local code snippet -into a production service that automatically scales according to traffic. - - -""" -# pylint: disable=C0413 - -import warnings -from typing import Sequence - -import pkg_resources -import pydantic - -if pydantic.VERSION.startswith("2."): - # HACK: Suppress warning from pydantic v2 about protected namespace, this is due to - # launch-python-client module is based on v1 and only does minimum to support forward compatibility - warnings.filterwarnings("ignore", category=UserWarning, module="pydantic") - -from .client import LaunchClient -from .connection import Connection -from .hooks import PostInferenceHooks -from .model_bundle import ModelBundle -from .model_endpoint import ( - AsyncEndpoint, - AsyncEndpointBatchResponse, - EndpointRequest, - EndpointResponse, - EndpointResponseFuture, - EndpointResponseStream, - SyncEndpoint, -) - -__version__ = pkg_resources.get_distribution("scale-launch").version -__all__: Sequence[str] = [ - "AsyncEndpoint", - "AsyncEndpointBatchResponse", - "Connection", - "EndpointRequest", - "EndpointResponse", - "EndpointResponseFuture", - "EndpointResponseStream", - "LaunchClient", - "ModelBundle", - "PostInferenceHooks", - "SyncEndpoint", -] diff --git a/launch/api_client/__init__.py b/launch/api_client/__init__.py deleted file mode 100644 index 7c58d56a..00000000 --- a/launch/api_client/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# coding: utf-8 - -# flake8: noqa - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -__version__ = "1.0.0" - -# import ApiClient -from launch.api_client.api_client import ApiClient - -# import Configuration -from launch.api_client.configuration import Configuration - -# import exceptions -from launch.api_client.exceptions import ( - ApiAttributeError, - ApiException, - ApiKeyError, - ApiTypeError, - ApiValueError, - OpenApiException, -) diff --git a/launch/api_client/api/__init__.py b/launch/api_client/api/__init__.py deleted file mode 100644 index 50fa1c77..00000000 --- a/launch/api_client/api/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# flake8: noqa - -# import apis into api package -from launch.api_client.api.default_api import DefaultApi diff --git a/launch/api_client/api/default_api.py b/launch/api_client/api/default_api.py deleted file mode 100644 index 3d6ae232..00000000 --- a/launch/api_client/api/default_api.py +++ /dev/null @@ -1,13814 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from typing import Any, Dict, List, Optional, Tuple, Union - -from pydantic import ( - Field, - StrictBytes, - StrictFloat, - StrictInt, - StrictStr, - validate_call, -) -from typing_extensions import Annotated - -from launch.api_client.api_client import ApiClient, RequestSerialized -from launch.api_client.api_response import ApiResponse -from launch.api_client.models.cancel_fine_tune_response import ( - CancelFineTuneResponse, -) -from launch.api_client.models.clone_model_bundle_v1_request import ( - CloneModelBundleV1Request, -) -from launch.api_client.models.clone_model_bundle_v2_request import ( - CloneModelBundleV2Request, -) -from launch.api_client.models.completion_stream_v1_request import ( - CompletionStreamV1Request, -) -from launch.api_client.models.completion_stream_v1_response import ( - CompletionStreamV1Response, -) -from launch.api_client.models.completion_sync_v1_request import ( - CompletionSyncV1Request, -) -from launch.api_client.models.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch.api_client.models.create_async_task_v1_response import ( - CreateAsyncTaskV1Response, -) -from launch.api_client.models.create_batch_completions_request import ( - CreateBatchCompletionsRequest, -) -from launch.api_client.models.create_batch_completions_response import ( - CreateBatchCompletionsResponse, -) -from launch.api_client.models.create_batch_job_v1_request import ( - CreateBatchJobV1Request, -) -from launch.api_client.models.create_batch_job_v1_response import ( - CreateBatchJobV1Response, -) -from launch.api_client.models.create_docker_image_batch_job_bundle_v1_request import ( - CreateDockerImageBatchJobBundleV1Request, -) -from launch.api_client.models.create_docker_image_batch_job_bundle_v1_response import ( - CreateDockerImageBatchJobBundleV1Response, -) -from launch.api_client.models.create_docker_image_batch_job_v1_request import ( - CreateDockerImageBatchJobV1Request, -) -from launch.api_client.models.create_docker_image_batch_job_v1_response import ( - CreateDockerImageBatchJobV1Response, -) -from launch.api_client.models.create_fine_tune_request import ( - CreateFineTuneRequest, -) -from launch.api_client.models.create_fine_tune_response import ( - CreateFineTuneResponse, -) -from launch.api_client.models.create_llm_model_endpoint_v1_request import ( - CreateLLMModelEndpointV1Request, -) -from launch.api_client.models.create_llm_model_endpoint_v1_response import ( - CreateLLMModelEndpointV1Response, -) -from launch.api_client.models.create_model_bundle_v1_request import ( - CreateModelBundleV1Request, -) -from launch.api_client.models.create_model_bundle_v1_response import ( - CreateModelBundleV1Response, -) -from launch.api_client.models.create_model_bundle_v2_request import ( - CreateModelBundleV2Request, -) -from launch.api_client.models.create_model_bundle_v2_response import ( - CreateModelBundleV2Response, -) -from launch.api_client.models.create_model_endpoint_v1_request import ( - CreateModelEndpointV1Request, -) -from launch.api_client.models.create_model_endpoint_v1_response import ( - CreateModelEndpointV1Response, -) -from launch.api_client.models.create_trigger_v1_request import ( - CreateTriggerV1Request, -) -from launch.api_client.models.create_trigger_v1_response import ( - CreateTriggerV1Response, -) -from launch.api_client.models.delete_file_response import DeleteFileResponse -from launch.api_client.models.delete_llm_endpoint_response import ( - DeleteLLMEndpointResponse, -) -from launch.api_client.models.delete_model_endpoint_v1_response import ( - DeleteModelEndpointV1Response, -) -from launch.api_client.models.delete_trigger_v1_response import ( - DeleteTriggerV1Response, -) -from launch.api_client.models.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) -from launch.api_client.models.endpoint_predict_v1_request import ( - EndpointPredictV1Request, -) -from launch.api_client.models.get_async_task_v1_response import ( - GetAsyncTaskV1Response, -) -from launch.api_client.models.get_batch_job_v1_response import ( - GetBatchJobV1Response, -) -from launch.api_client.models.get_docker_image_batch_job_v1_response import ( - GetDockerImageBatchJobV1Response, -) -from launch.api_client.models.get_file_content_response import ( - GetFileContentResponse, -) -from launch.api_client.models.get_file_response import GetFileResponse -from launch.api_client.models.get_fine_tune_events_response import ( - GetFineTuneEventsResponse, -) -from launch.api_client.models.get_fine_tune_response import GetFineTuneResponse -from launch.api_client.models.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) -from launch.api_client.models.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) -from launch.api_client.models.get_trigger_v1_response import ( - GetTriggerV1Response, -) -from launch.api_client.models.list_docker_image_batch_job_bundle_v1_response import ( - ListDockerImageBatchJobBundleV1Response, -) -from launch.api_client.models.list_docker_image_batch_jobs_v1_response import ( - ListDockerImageBatchJobsV1Response, -) -from launch.api_client.models.list_files_response import ListFilesResponse -from launch.api_client.models.list_fine_tunes_response import ( - ListFineTunesResponse, -) -from launch.api_client.models.list_llm_model_endpoints_v1_response import ( - ListLLMModelEndpointsV1Response, -) -from launch.api_client.models.list_model_bundles_v1_response import ( - ListModelBundlesV1Response, -) -from launch.api_client.models.list_model_bundles_v2_response import ( - ListModelBundlesV2Response, -) -from launch.api_client.models.list_model_endpoints_v1_response import ( - ListModelEndpointsV1Response, -) -from launch.api_client.models.list_triggers_v1_response import ( - ListTriggersV1Response, -) -from launch.api_client.models.model_bundle_order_by import ModelBundleOrderBy -from launch.api_client.models.model_bundle_v1_response import ( - ModelBundleV1Response, -) -from launch.api_client.models.model_bundle_v2_response import ( - ModelBundleV2Response, -) -from launch.api_client.models.model_download_request import ( - ModelDownloadRequest, -) -from launch.api_client.models.model_download_response import ( - ModelDownloadResponse, -) -from launch.api_client.models.model_endpoint_order_by import ( - ModelEndpointOrderBy, -) -from launch.api_client.models.sync_endpoint_predict_v1_request import ( - SyncEndpointPredictV1Request, -) -from launch.api_client.models.sync_endpoint_predict_v1_response import ( - SyncEndpointPredictV1Response, -) -from launch.api_client.models.update_batch_job_v1_request import ( - UpdateBatchJobV1Request, -) -from launch.api_client.models.update_batch_job_v1_response import ( - UpdateBatchJobV1Response, -) -from launch.api_client.models.update_docker_image_batch_job_v1_request import ( - UpdateDockerImageBatchJobV1Request, -) -from launch.api_client.models.update_docker_image_batch_job_v1_response import ( - UpdateDockerImageBatchJobV1Response, -) -from launch.api_client.models.update_llm_model_endpoint_v1_request import ( - UpdateLLMModelEndpointV1Request, -) -from launch.api_client.models.update_llm_model_endpoint_v1_response import ( - UpdateLLMModelEndpointV1Response, -) -from launch.api_client.models.update_model_endpoint_v1_request import ( - UpdateModelEndpointV1Request, -) -from launch.api_client.models.update_model_endpoint_v1_response import ( - UpdateModelEndpointV1Response, -) -from launch.api_client.models.update_trigger_v1_request import ( - UpdateTriggerV1Request, -) -from launch.api_client.models.update_trigger_v1_response import ( - UpdateTriggerV1Response, -) -from launch.api_client.models.upload_file_response import UploadFileResponse -from launch.api_client.rest import RESTResponseType - - -class DefaultApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CancelFineTuneResponse: - """Cancel Fine Tune - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CancelFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_with_http_info( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CancelFineTuneResponse]: - """Cancel Fine Tune - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CancelFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_without_preload_content( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Cancel Fine Tune - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CancelFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_serialize( - self, - fine_tune_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if fine_tune_id is not None: - _path_params["fine_tune_id"] = fine_tune_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="PUT", - resource_path="/v1/llm/fine-tunes/{fine_tune_id}/cancel", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - clone_model_bundle_v1_request: CloneModelBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateModelBundleV1Response: - """Clone Model Bundle With Changes - - Creates a ModelBundle by cloning an existing one and then applying changes on top. - - :param clone_model_bundle_v1_request: (required) - :type clone_model_bundle_v1_request: CloneModelBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_serialize( - clone_model_bundle_v1_request=clone_model_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_with_http_info( - self, - clone_model_bundle_v1_request: CloneModelBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateModelBundleV1Response]: - """Clone Model Bundle With Changes - - Creates a ModelBundle by cloning an existing one and then applying changes on top. - - :param clone_model_bundle_v1_request: (required) - :type clone_model_bundle_v1_request: CloneModelBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_serialize( - clone_model_bundle_v1_request=clone_model_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_without_preload_content( - self, - clone_model_bundle_v1_request: CloneModelBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Clone Model Bundle With Changes - - Creates a ModelBundle by cloning an existing one and then applying changes on top. - - :param clone_model_bundle_v1_request: (required) - :type clone_model_bundle_v1_request: CloneModelBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_serialize( - clone_model_bundle_v1_request=clone_model_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_serialize( - self, - clone_model_bundle_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if clone_model_bundle_v1_request is not None: - _body_params = clone_model_bundle_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/model-bundles/clone-with-changes", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - clone_model_bundle_v2_request: CloneModelBundleV2Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateModelBundleV2Response: - """Clone Model Bundle With Changes - - Creates a ModelBundle by cloning an existing one and then applying changes on top. - - :param clone_model_bundle_v2_request: (required) - :type clone_model_bundle_v2_request: CloneModelBundleV2Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_serialize( - clone_model_bundle_v2_request=clone_model_bundle_v2_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_with_http_info( - self, - clone_model_bundle_v2_request: CloneModelBundleV2Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateModelBundleV2Response]: - """Clone Model Bundle With Changes - - Creates a ModelBundle by cloning an existing one and then applying changes on top. - - :param clone_model_bundle_v2_request: (required) - :type clone_model_bundle_v2_request: CloneModelBundleV2Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_serialize( - clone_model_bundle_v2_request=clone_model_bundle_v2_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_without_preload_content( - self, - clone_model_bundle_v2_request: CloneModelBundleV2Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Clone Model Bundle With Changes - - Creates a ModelBundle by cloning an existing one and then applying changes on top. - - :param clone_model_bundle_v2_request: (required) - :type clone_model_bundle_v2_request: CloneModelBundleV2Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_serialize( - clone_model_bundle_v2_request=clone_model_bundle_v2_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_serialize( - self, - clone_model_bundle_v2_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if clone_model_bundle_v2_request is not None: - _body_params = clone_model_bundle_v2_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v2/model-bundles/clone-with-changes", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_async_inference_task_v1_async_tasks_post( - self, - model_endpoint_id: StrictStr, - endpoint_predict_v1_request: EndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateAsyncTaskV1Response: - """Create Async Inference Task - - Runs an async inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param endpoint_predict_v1_request: (required) - :type endpoint_predict_v1_request: EndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_async_inference_task_v1_async_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - endpoint_predict_v1_request=endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateAsyncTaskV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_async_inference_task_v1_async_tasks_post_with_http_info( - self, - model_endpoint_id: StrictStr, - endpoint_predict_v1_request: EndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateAsyncTaskV1Response]: - """Create Async Inference Task - - Runs an async inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param endpoint_predict_v1_request: (required) - :type endpoint_predict_v1_request: EndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_async_inference_task_v1_async_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - endpoint_predict_v1_request=endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateAsyncTaskV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_async_inference_task_v1_async_tasks_post_without_preload_content( - self, - model_endpoint_id: StrictStr, - endpoint_predict_v1_request: EndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Async Inference Task - - Runs an async inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param endpoint_predict_v1_request: (required) - :type endpoint_predict_v1_request: EndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_async_inference_task_v1_async_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - endpoint_predict_v1_request=endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateAsyncTaskV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_async_inference_task_v1_async_tasks_post_serialize( - self, - model_endpoint_id, - endpoint_predict_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_endpoint_id is not None: - _query_params.append(("model_endpoint_id", model_endpoint_id)) - - # process the header parameters - # process the form parameters - # process the body parameter - if endpoint_predict_v1_request is not None: - _body_params = endpoint_predict_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/async-tasks", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_batch_completions_v1_llm_batch_completions_post( - self, - create_batch_completions_request: CreateBatchCompletionsRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateBatchCompletionsResponse: - """Create Batch Completions - - - :param create_batch_completions_request: (required) - :type create_batch_completions_request: CreateBatchCompletionsRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_batch_completions_v1_llm_batch_completions_post_serialize( - create_batch_completions_request=create_batch_completions_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateBatchCompletionsResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_batch_completions_v1_llm_batch_completions_post_with_http_info( - self, - create_batch_completions_request: CreateBatchCompletionsRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateBatchCompletionsResponse]: - """Create Batch Completions - - - :param create_batch_completions_request: (required) - :type create_batch_completions_request: CreateBatchCompletionsRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_batch_completions_v1_llm_batch_completions_post_serialize( - create_batch_completions_request=create_batch_completions_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateBatchCompletionsResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_batch_completions_v1_llm_batch_completions_post_without_preload_content( - self, - create_batch_completions_request: CreateBatchCompletionsRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Batch Completions - - - :param create_batch_completions_request: (required) - :type create_batch_completions_request: CreateBatchCompletionsRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_batch_completions_v1_llm_batch_completions_post_serialize( - create_batch_completions_request=create_batch_completions_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateBatchCompletionsResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_batch_completions_v1_llm_batch_completions_post_serialize( - self, - create_batch_completions_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_batch_completions_request is not None: - _body_params = create_batch_completions_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/llm/batch-completions", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_batch_job_v1_batch_jobs_post( - self, - create_batch_job_v1_request: CreateBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateBatchJobV1Response: - """Create Batch Job - - Runs a batch job. - - :param create_batch_job_v1_request: (required) - :type create_batch_job_v1_request: CreateBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_batch_job_v1_batch_jobs_post_serialize( - create_batch_job_v1_request=create_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_batch_job_v1_batch_jobs_post_with_http_info( - self, - create_batch_job_v1_request: CreateBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateBatchJobV1Response]: - """Create Batch Job - - Runs a batch job. - - :param create_batch_job_v1_request: (required) - :type create_batch_job_v1_request: CreateBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_batch_job_v1_batch_jobs_post_serialize( - create_batch_job_v1_request=create_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_batch_job_v1_batch_jobs_post_without_preload_content( - self, - create_batch_job_v1_request: CreateBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Batch Job - - Runs a batch job. - - :param create_batch_job_v1_request: (required) - :type create_batch_job_v1_request: CreateBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_batch_job_v1_batch_jobs_post_serialize( - create_batch_job_v1_request=create_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_batch_job_v1_batch_jobs_post_serialize( - self, - create_batch_job_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_batch_job_v1_request is not None: - _body_params = create_batch_job_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/batch-jobs", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - model_endpoint_name: StrictStr, - completion_stream_v1_request: CompletionStreamV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CompletionStreamV1Response: - """Create Completion Stream Task - - Runs a stream prompt completion on an LLM. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param completion_stream_v1_request: (required) - :type completion_stream_v1_request: CompletionStreamV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_completion_stream_task_v1_llm_completions_stream_post_serialize( - model_endpoint_name=model_endpoint_name, - completion_stream_v1_request=completion_stream_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionStreamV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_completion_stream_task_v1_llm_completions_stream_post_with_http_info( - self, - model_endpoint_name: StrictStr, - completion_stream_v1_request: CompletionStreamV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CompletionStreamV1Response]: - """Create Completion Stream Task - - Runs a stream prompt completion on an LLM. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param completion_stream_v1_request: (required) - :type completion_stream_v1_request: CompletionStreamV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_completion_stream_task_v1_llm_completions_stream_post_serialize( - model_endpoint_name=model_endpoint_name, - completion_stream_v1_request=completion_stream_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionStreamV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_completion_stream_task_v1_llm_completions_stream_post_without_preload_content( - self, - model_endpoint_name: StrictStr, - completion_stream_v1_request: CompletionStreamV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Completion Stream Task - - Runs a stream prompt completion on an LLM. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param completion_stream_v1_request: (required) - :type completion_stream_v1_request: CompletionStreamV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_completion_stream_task_v1_llm_completions_stream_post_serialize( - model_endpoint_name=model_endpoint_name, - completion_stream_v1_request=completion_stream_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionStreamV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_completion_stream_task_v1_llm_completions_stream_post_serialize( - self, - model_endpoint_name, - completion_stream_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_endpoint_name is not None: - _query_params.append(("model_endpoint_name", model_endpoint_name)) - - # process the header parameters - # process the form parameters - # process the body parameter - if completion_stream_v1_request is not None: - _body_params = completion_stream_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/llm/completions-stream", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - model_endpoint_name: StrictStr, - completion_sync_v1_request: CompletionSyncV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CompletionSyncV1Response: - """Create Completion Sync Task - - Runs a sync prompt completion on an LLM. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param completion_sync_v1_request: (required) - :type completion_sync_v1_request: CompletionSyncV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_completion_sync_task_v1_llm_completions_sync_post_serialize( - model_endpoint_name=model_endpoint_name, - completion_sync_v1_request=completion_sync_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionSyncV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_completion_sync_task_v1_llm_completions_sync_post_with_http_info( - self, - model_endpoint_name: StrictStr, - completion_sync_v1_request: CompletionSyncV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CompletionSyncV1Response]: - """Create Completion Sync Task - - Runs a sync prompt completion on an LLM. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param completion_sync_v1_request: (required) - :type completion_sync_v1_request: CompletionSyncV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_completion_sync_task_v1_llm_completions_sync_post_serialize( - model_endpoint_name=model_endpoint_name, - completion_sync_v1_request=completion_sync_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionSyncV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_completion_sync_task_v1_llm_completions_sync_post_without_preload_content( - self, - model_endpoint_name: StrictStr, - completion_sync_v1_request: CompletionSyncV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Completion Sync Task - - Runs a sync prompt completion on an LLM. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param completion_sync_v1_request: (required) - :type completion_sync_v1_request: CompletionSyncV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_completion_sync_task_v1_llm_completions_sync_post_serialize( - model_endpoint_name=model_endpoint_name, - completion_sync_v1_request=completion_sync_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionSyncV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_completion_sync_task_v1_llm_completions_sync_post_serialize( - self, - model_endpoint_name, - completion_sync_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_endpoint_name is not None: - _query_params.append(("model_endpoint_name", model_endpoint_name)) - - # process the header parameters - # process the form parameters - # process the body parameter - if completion_sync_v1_request is not None: - _body_params = completion_sync_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/llm/completions-sync", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - create_docker_image_batch_job_bundle_v1_request: CreateDockerImageBatchJobBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateDockerImageBatchJobBundleV1Response: - """Create Docker Image Batch Job Bundle - - Creates a docker iamge batch job bundle - - :param create_docker_image_batch_job_bundle_v1_request: (required) - :type create_docker_image_batch_job_bundle_v1_request: CreateDockerImageBatchJobBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_serialize( - create_docker_image_batch_job_bundle_v1_request=create_docker_image_batch_job_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateDockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_with_http_info( - self, - create_docker_image_batch_job_bundle_v1_request: CreateDockerImageBatchJobBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateDockerImageBatchJobBundleV1Response]: - """Create Docker Image Batch Job Bundle - - Creates a docker iamge batch job bundle - - :param create_docker_image_batch_job_bundle_v1_request: (required) - :type create_docker_image_batch_job_bundle_v1_request: CreateDockerImageBatchJobBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_serialize( - create_docker_image_batch_job_bundle_v1_request=create_docker_image_batch_job_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateDockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_without_preload_content( - self, - create_docker_image_batch_job_bundle_v1_request: CreateDockerImageBatchJobBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Docker Image Batch Job Bundle - - Creates a docker iamge batch job bundle - - :param create_docker_image_batch_job_bundle_v1_request: (required) - :type create_docker_image_batch_job_bundle_v1_request: CreateDockerImageBatchJobBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_serialize( - create_docker_image_batch_job_bundle_v1_request=create_docker_image_batch_job_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateDockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_serialize( - self, - create_docker_image_batch_job_bundle_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_docker_image_batch_job_bundle_v1_request is not None: - _body_params = create_docker_image_batch_job_bundle_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/docker-image-batch-job-bundles", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - create_docker_image_batch_job_v1_request: CreateDockerImageBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateDockerImageBatchJobV1Response: - """Create Docker Image Batch Job - - - :param create_docker_image_batch_job_v1_request: (required) - :type create_docker_image_batch_job_v1_request: CreateDockerImageBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_docker_image_batch_job_v1_docker_image_batch_jobs_post_serialize( - create_docker_image_batch_job_v1_request=create_docker_image_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post_with_http_info( - self, - create_docker_image_batch_job_v1_request: CreateDockerImageBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateDockerImageBatchJobV1Response]: - """Create Docker Image Batch Job - - - :param create_docker_image_batch_job_v1_request: (required) - :type create_docker_image_batch_job_v1_request: CreateDockerImageBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_docker_image_batch_job_v1_docker_image_batch_jobs_post_serialize( - create_docker_image_batch_job_v1_request=create_docker_image_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post_without_preload_content( - self, - create_docker_image_batch_job_v1_request: CreateDockerImageBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Docker Image Batch Job - - - :param create_docker_image_batch_job_v1_request: (required) - :type create_docker_image_batch_job_v1_request: CreateDockerImageBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_docker_image_batch_job_v1_docker_image_batch_jobs_post_serialize( - create_docker_image_batch_job_v1_request=create_docker_image_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_serialize( - self, - create_docker_image_batch_job_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_docker_image_batch_job_v1_request is not None: - _body_params = create_docker_image_batch_job_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/docker-image-batch-jobs", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_fine_tune_v1_llm_fine_tunes_post( - self, - create_fine_tune_request: CreateFineTuneRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateFineTuneResponse: - """Create Fine Tune - - - :param create_fine_tune_request: (required) - :type create_fine_tune_request: CreateFineTuneRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_fine_tune_v1_llm_fine_tunes_post_serialize( - create_fine_tune_request=create_fine_tune_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_fine_tune_v1_llm_fine_tunes_post_with_http_info( - self, - create_fine_tune_request: CreateFineTuneRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateFineTuneResponse]: - """Create Fine Tune - - - :param create_fine_tune_request: (required) - :type create_fine_tune_request: CreateFineTuneRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_fine_tune_v1_llm_fine_tunes_post_serialize( - create_fine_tune_request=create_fine_tune_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_fine_tune_v1_llm_fine_tunes_post_without_preload_content( - self, - create_fine_tune_request: CreateFineTuneRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Fine Tune - - - :param create_fine_tune_request: (required) - :type create_fine_tune_request: CreateFineTuneRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_fine_tune_v1_llm_fine_tunes_post_serialize( - create_fine_tune_request=create_fine_tune_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_fine_tune_v1_llm_fine_tunes_post_serialize( - self, - create_fine_tune_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_fine_tune_request is not None: - _body_params = create_fine_tune_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/llm/fine-tunes", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_model_bundle_v1_model_bundles_post( - self, - create_model_bundle_v1_request: CreateModelBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateModelBundleV1Response: - """Create Model Bundle - - Creates a ModelBundle for the current user. - - :param create_model_bundle_v1_request: (required) - :type create_model_bundle_v1_request: CreateModelBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_bundle_v1_model_bundles_post_serialize( - create_model_bundle_v1_request=create_model_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_model_bundle_v1_model_bundles_post_with_http_info( - self, - create_model_bundle_v1_request: CreateModelBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateModelBundleV1Response]: - """Create Model Bundle - - Creates a ModelBundle for the current user. - - :param create_model_bundle_v1_request: (required) - :type create_model_bundle_v1_request: CreateModelBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_bundle_v1_model_bundles_post_serialize( - create_model_bundle_v1_request=create_model_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_model_bundle_v1_model_bundles_post_without_preload_content( - self, - create_model_bundle_v1_request: CreateModelBundleV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Model Bundle - - Creates a ModelBundle for the current user. - - :param create_model_bundle_v1_request: (required) - :type create_model_bundle_v1_request: CreateModelBundleV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_bundle_v1_model_bundles_post_serialize( - create_model_bundle_v1_request=create_model_bundle_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_model_bundle_v1_model_bundles_post_serialize( - self, - create_model_bundle_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_model_bundle_v1_request is not None: - _body_params = create_model_bundle_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/model-bundles", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_model_bundle_v2_model_bundles_post( - self, - create_model_bundle_v2_request: CreateModelBundleV2Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateModelBundleV2Response: - """Create Model Bundle - - Creates a ModelBundle for the current user. - - :param create_model_bundle_v2_request: (required) - :type create_model_bundle_v2_request: CreateModelBundleV2Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_bundle_v2_model_bundles_post_serialize( - create_model_bundle_v2_request=create_model_bundle_v2_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_model_bundle_v2_model_bundles_post_with_http_info( - self, - create_model_bundle_v2_request: CreateModelBundleV2Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateModelBundleV2Response]: - """Create Model Bundle - - Creates a ModelBundle for the current user. - - :param create_model_bundle_v2_request: (required) - :type create_model_bundle_v2_request: CreateModelBundleV2Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_bundle_v2_model_bundles_post_serialize( - create_model_bundle_v2_request=create_model_bundle_v2_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_model_bundle_v2_model_bundles_post_without_preload_content( - self, - create_model_bundle_v2_request: CreateModelBundleV2Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Model Bundle - - Creates a ModelBundle for the current user. - - :param create_model_bundle_v2_request: (required) - :type create_model_bundle_v2_request: CreateModelBundleV2Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_bundle_v2_model_bundles_post_serialize( - create_model_bundle_v2_request=create_model_bundle_v2_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_model_bundle_v2_model_bundles_post_serialize( - self, - create_model_bundle_v2_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_model_bundle_v2_request is not None: - _body_params = create_model_bundle_v2_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v2/model-bundles", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_model_endpoint_v1_llm_model_endpoints_post( - self, - create_llm_model_endpoint_v1_request: CreateLLMModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateLLMModelEndpointV1Response: - """Create Model Endpoint - - Creates an LLM endpoint for the current user. - - :param create_llm_model_endpoint_v1_request: (required) - :type create_llm_model_endpoint_v1_request: CreateLLMModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_endpoint_v1_llm_model_endpoints_post_serialize( - create_llm_model_endpoint_v1_request=create_llm_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_model_endpoint_v1_llm_model_endpoints_post_with_http_info( - self, - create_llm_model_endpoint_v1_request: CreateLLMModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateLLMModelEndpointV1Response]: - """Create Model Endpoint - - Creates an LLM endpoint for the current user. - - :param create_llm_model_endpoint_v1_request: (required) - :type create_llm_model_endpoint_v1_request: CreateLLMModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_endpoint_v1_llm_model_endpoints_post_serialize( - create_llm_model_endpoint_v1_request=create_llm_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_model_endpoint_v1_llm_model_endpoints_post_without_preload_content( - self, - create_llm_model_endpoint_v1_request: CreateLLMModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Model Endpoint - - Creates an LLM endpoint for the current user. - - :param create_llm_model_endpoint_v1_request: (required) - :type create_llm_model_endpoint_v1_request: CreateLLMModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_endpoint_v1_llm_model_endpoints_post_serialize( - create_llm_model_endpoint_v1_request=create_llm_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_model_endpoint_v1_llm_model_endpoints_post_serialize( - self, - create_llm_model_endpoint_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_llm_model_endpoint_v1_request is not None: - _body_params = create_llm_model_endpoint_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/llm/model-endpoints", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_model_endpoint_v1_model_endpoints_post( - self, - create_model_endpoint_v1_request: CreateModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateModelEndpointV1Response: - """Create Model Endpoint - - Creates a Model for the current user. - - :param create_model_endpoint_v1_request: (required) - :type create_model_endpoint_v1_request: CreateModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_endpoint_v1_model_endpoints_post_serialize( - create_model_endpoint_v1_request=create_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_model_endpoint_v1_model_endpoints_post_with_http_info( - self, - create_model_endpoint_v1_request: CreateModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateModelEndpointV1Response]: - """Create Model Endpoint - - Creates a Model for the current user. - - :param create_model_endpoint_v1_request: (required) - :type create_model_endpoint_v1_request: CreateModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_endpoint_v1_model_endpoints_post_serialize( - create_model_endpoint_v1_request=create_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_model_endpoint_v1_model_endpoints_post_without_preload_content( - self, - create_model_endpoint_v1_request: CreateModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Model Endpoint - - Creates a Model for the current user. - - :param create_model_endpoint_v1_request: (required) - :type create_model_endpoint_v1_request: CreateModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_model_endpoint_v1_model_endpoints_post_serialize( - create_model_endpoint_v1_request=create_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_model_endpoint_v1_model_endpoints_post_serialize( - self, - create_model_endpoint_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_model_endpoint_v1_request is not None: - _body_params = create_model_endpoint_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/model-endpoints", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - model_endpoint_id: StrictStr, - sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> object: - """Create Streaming Inference Task - - Runs a streaming inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param sync_endpoint_predict_v1_request: (required) - :type sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_streaming_inference_task_v1_streaming_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - sync_endpoint_predict_v1_request=sync_endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_streaming_inference_task_v1_streaming_tasks_post_with_http_info( - self, - model_endpoint_id: StrictStr, - sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[object]: - """Create Streaming Inference Task - - Runs a streaming inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param sync_endpoint_predict_v1_request: (required) - :type sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_streaming_inference_task_v1_streaming_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - sync_endpoint_predict_v1_request=sync_endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_streaming_inference_task_v1_streaming_tasks_post_without_preload_content( - self, - model_endpoint_id: StrictStr, - sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Streaming Inference Task - - Runs a streaming inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param sync_endpoint_predict_v1_request: (required) - :type sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_streaming_inference_task_v1_streaming_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - sync_endpoint_predict_v1_request=sync_endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_streaming_inference_task_v1_streaming_tasks_post_serialize( - self, - model_endpoint_id, - sync_endpoint_predict_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_endpoint_id is not None: - _query_params.append(("model_endpoint_id", model_endpoint_id)) - - # process the header parameters - # process the form parameters - # process the body parameter - if sync_endpoint_predict_v1_request is not None: - _body_params = sync_endpoint_predict_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/streaming-tasks", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_sync_inference_task_v1_sync_tasks_post( - self, - model_endpoint_id: StrictStr, - sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> SyncEndpointPredictV1Response: - """Create Sync Inference Task - - Runs a sync inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param sync_endpoint_predict_v1_request: (required) - :type sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_sync_inference_task_v1_sync_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - sync_endpoint_predict_v1_request=sync_endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "SyncEndpointPredictV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_sync_inference_task_v1_sync_tasks_post_with_http_info( - self, - model_endpoint_id: StrictStr, - sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[SyncEndpointPredictV1Response]: - """Create Sync Inference Task - - Runs a sync inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param sync_endpoint_predict_v1_request: (required) - :type sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_sync_inference_task_v1_sync_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - sync_endpoint_predict_v1_request=sync_endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "SyncEndpointPredictV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_sync_inference_task_v1_sync_tasks_post_without_preload_content( - self, - model_endpoint_id: StrictStr, - sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Sync Inference Task - - Runs a sync inference prediction. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param sync_endpoint_predict_v1_request: (required) - :type sync_endpoint_predict_v1_request: SyncEndpointPredictV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_sync_inference_task_v1_sync_tasks_post_serialize( - model_endpoint_id=model_endpoint_id, - sync_endpoint_predict_v1_request=sync_endpoint_predict_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "SyncEndpointPredictV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_sync_inference_task_v1_sync_tasks_post_serialize( - self, - model_endpoint_id, - sync_endpoint_predict_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_endpoint_id is not None: - _query_params.append(("model_endpoint_id", model_endpoint_id)) - - # process the header parameters - # process the form parameters - # process the body parameter - if sync_endpoint_predict_v1_request is not None: - _body_params = sync_endpoint_predict_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/sync-tasks", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def create_trigger_v1_triggers_post( - self, - create_trigger_v1_request: CreateTriggerV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CreateTriggerV1Response: - """Create Trigger - - Creates and runs a trigger - - :param create_trigger_v1_request: (required) - :type create_trigger_v1_request: CreateTriggerV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_trigger_v1_triggers_post_serialize( - create_trigger_v1_request=create_trigger_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def create_trigger_v1_triggers_post_with_http_info( - self, - create_trigger_v1_request: CreateTriggerV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CreateTriggerV1Response]: - """Create Trigger - - Creates and runs a trigger - - :param create_trigger_v1_request: (required) - :type create_trigger_v1_request: CreateTriggerV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_trigger_v1_triggers_post_serialize( - create_trigger_v1_request=create_trigger_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def create_trigger_v1_triggers_post_without_preload_content( - self, - create_trigger_v1_request: CreateTriggerV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create Trigger - - Creates and runs a trigger - - :param create_trigger_v1_request: (required) - :type create_trigger_v1_request: CreateTriggerV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_trigger_v1_triggers_post_serialize( - create_trigger_v1_request=create_trigger_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CreateTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _create_trigger_v1_triggers_post_serialize( - self, - create_trigger_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_trigger_v1_request is not None: - _body_params = create_trigger_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/triggers", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def delete_file_v1_files_file_id_delete( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DeleteFileResponse: - """Delete File - - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_file_v1_files_file_id_delete_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def delete_file_v1_files_file_id_delete_with_http_info( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DeleteFileResponse]: - """Delete File - - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_file_v1_files_file_id_delete_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def delete_file_v1_files_file_id_delete_without_preload_content( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Delete File - - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_file_v1_files_file_id_delete_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _delete_file_v1_files_file_id_delete_serialize( - self, - file_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if file_id is not None: - _path_params["file_id"] = file_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="DELETE", - resource_path="/v1/files/{file_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - model_endpoint_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DeleteLLMEndpointResponse: - """Delete Llm Model Endpoint - - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_serialize( - model_endpoint_name=model_endpoint_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteLLMEndpointResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_with_http_info( - self, - model_endpoint_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DeleteLLMEndpointResponse]: - """Delete Llm Model Endpoint - - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_serialize( - model_endpoint_name=model_endpoint_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteLLMEndpointResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_without_preload_content( - self, - model_endpoint_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Delete Llm Model Endpoint - - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_serialize( - model_endpoint_name=model_endpoint_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteLLMEndpointResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_serialize( - self, - model_endpoint_name, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_endpoint_name is not None: - _path_params["model_endpoint_name"] = model_endpoint_name - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="DELETE", - resource_path="/v1/llm/model-endpoints/{model_endpoint_name}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - model_endpoint_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DeleteModelEndpointV1Response: - """Delete Model Endpoint - - Lists the Models owned by the current owner. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_serialize( - model_endpoint_id=model_endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_with_http_info( - self, - model_endpoint_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DeleteModelEndpointV1Response]: - """Delete Model Endpoint - - Lists the Models owned by the current owner. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_serialize( - model_endpoint_id=model_endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_without_preload_content( - self, - model_endpoint_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Delete Model Endpoint - - Lists the Models owned by the current owner. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_serialize( - model_endpoint_id=model_endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_serialize( - self, - model_endpoint_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_endpoint_id is not None: - _path_params["model_endpoint_id"] = model_endpoint_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="DELETE", - resource_path="/v1/model-endpoints/{model_endpoint_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def delete_trigger_v1_triggers_trigger_id_delete( - self, - trigger_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DeleteTriggerV1Response: - """Delete Trigger - - Deletes the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_trigger_v1_triggers_trigger_id_delete_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def delete_trigger_v1_triggers_trigger_id_delete_with_http_info( - self, - trigger_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DeleteTriggerV1Response]: - """Delete Trigger - - Deletes the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_trigger_v1_triggers_trigger_id_delete_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def delete_trigger_v1_triggers_trigger_id_delete_without_preload_content( - self, - trigger_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Delete Trigger - - Deletes the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_trigger_v1_triggers_trigger_id_delete_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DeleteTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _delete_trigger_v1_triggers_trigger_id_delete_serialize( - self, - trigger_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if trigger_id is not None: - _path_params["trigger_id"] = trigger_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="DELETE", - resource_path="/v1/triggers/{trigger_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - model_download_request: ModelDownloadRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ModelDownloadResponse: - """Download Model Endpoint - - - :param model_download_request: (required) - :type model_download_request: ModelDownloadRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._download_model_endpoint_v1_llm_model_endpoints_download_post_serialize( - model_download_request=model_download_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelDownloadResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def download_model_endpoint_v1_llm_model_endpoints_download_post_with_http_info( - self, - model_download_request: ModelDownloadRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ModelDownloadResponse]: - """Download Model Endpoint - - - :param model_download_request: (required) - :type model_download_request: ModelDownloadRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._download_model_endpoint_v1_llm_model_endpoints_download_post_serialize( - model_download_request=model_download_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelDownloadResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def download_model_endpoint_v1_llm_model_endpoints_download_post_without_preload_content( - self, - model_download_request: ModelDownloadRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Download Model Endpoint - - - :param model_download_request: (required) - :type model_download_request: ModelDownloadRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._download_model_endpoint_v1_llm_model_endpoints_download_post_serialize( - model_download_request=model_download_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelDownloadResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _download_model_endpoint_v1_llm_model_endpoints_download_post_serialize( - self, - model_download_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if model_download_request is not None: - _body_params = model_download_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/llm/model-endpoints/download", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - task_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetAsyncTaskV1Response: - """Get Async Inference Task - - Gets the status of an async inference task. - - :param task_id: (required) - :type task_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_async_inference_task_v1_async_tasks_task_id_get_serialize( - task_id=task_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetAsyncTaskV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_async_inference_task_v1_async_tasks_task_id_get_with_http_info( - self, - task_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetAsyncTaskV1Response]: - """Get Async Inference Task - - Gets the status of an async inference task. - - :param task_id: (required) - :type task_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_async_inference_task_v1_async_tasks_task_id_get_serialize( - task_id=task_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetAsyncTaskV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_async_inference_task_v1_async_tasks_task_id_get_without_preload_content( - self, - task_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Async Inference Task - - Gets the status of an async inference task. - - :param task_id: (required) - :type task_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_async_inference_task_v1_async_tasks_task_id_get_serialize( - task_id=task_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetAsyncTaskV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_async_inference_task_v1_async_tasks_task_id_get_serialize( - self, - task_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if task_id is not None: - _path_params["task_id"] = task_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/async-tasks/{task_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - batch_job_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetBatchJobV1Response: - """Get Batch Job - - Gets a batch job. - - :param batch_job_id: (required) - :type batch_job_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_batch_job_v1_batch_jobs_batch_job_id_get_serialize( - batch_job_id=batch_job_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_batch_job_v1_batch_jobs_batch_job_id_get_with_http_info( - self, - batch_job_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetBatchJobV1Response]: - """Get Batch Job - - Gets a batch job. - - :param batch_job_id: (required) - :type batch_job_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_batch_job_v1_batch_jobs_batch_job_id_get_serialize( - batch_job_id=batch_job_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_batch_job_v1_batch_jobs_batch_job_id_get_without_preload_content( - self, - batch_job_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Batch Job - - Gets a batch job. - - :param batch_job_id: (required) - :type batch_job_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_batch_job_v1_batch_jobs_batch_job_id_get_serialize( - batch_job_id=batch_job_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_batch_job_v1_batch_jobs_batch_job_id_get_serialize( - self, - batch_job_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if batch_job_id is not None: - _path_params["batch_job_id"] = batch_job_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/batch-jobs/{batch_job_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - docker_image_batch_job_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DockerImageBatchJobBundleV1Response: - """Get Docker Image Batch Job Model Bundle - - Get details for a given DockerImageBatchJobBundle owned by the current owner - - :param docker_image_batch_job_bundle_id: (required) - :type docker_image_batch_job_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_serialize( - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_with_http_info( - self, - docker_image_batch_job_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DockerImageBatchJobBundleV1Response]: - """Get Docker Image Batch Job Model Bundle - - Get details for a given DockerImageBatchJobBundle owned by the current owner - - :param docker_image_batch_job_bundle_id: (required) - :type docker_image_batch_job_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_serialize( - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_without_preload_content( - self, - docker_image_batch_job_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Docker Image Batch Job Model Bundle - - Get details for a given DockerImageBatchJobBundle owned by the current owner - - :param docker_image_batch_job_bundle_id: (required) - :type docker_image_batch_job_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_serialize( - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_serialize( - self, - docker_image_batch_job_bundle_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if docker_image_batch_job_bundle_id is not None: - _path_params["docker_image_batch_job_bundle_id"] = docker_image_batch_job_bundle_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/docker-image-batch-job-bundles/{docker_image_batch_job_bundle_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - batch_job_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetDockerImageBatchJobV1Response: - """Get Docker Image Batch Job - - - :param batch_job_id: (required) - :type batch_job_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_serialize( - batch_job_id=batch_job_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_with_http_info( - self, - batch_job_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetDockerImageBatchJobV1Response]: - """Get Docker Image Batch Job - - - :param batch_job_id: (required) - :type batch_job_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_serialize( - batch_job_id=batch_job_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_without_preload_content( - self, - batch_job_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Docker Image Batch Job - - - :param batch_job_id: (required) - :type batch_job_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_serialize( - batch_job_id=batch_job_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_serialize( - self, - batch_job_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if batch_job_id is not None: - _path_params["batch_job_id"] = batch_job_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/docker-image-batch-jobs/{batch_job_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_file_content_v1_files_file_id_content_get( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetFileContentResponse: - """Get File Content - - Describe the LLM Model endpoint with given name. - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_file_content_v1_files_file_id_content_get_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFileContentResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_file_content_v1_files_file_id_content_get_with_http_info( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetFileContentResponse]: - """Get File Content - - Describe the LLM Model endpoint with given name. - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_file_content_v1_files_file_id_content_get_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFileContentResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_file_content_v1_files_file_id_content_get_without_preload_content( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get File Content - - Describe the LLM Model endpoint with given name. - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_file_content_v1_files_file_id_content_get_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFileContentResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_file_content_v1_files_file_id_content_get_serialize( - self, - file_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if file_id is not None: - _path_params["file_id"] = file_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/files/{file_id}/content", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_file_v1_files_file_id_get( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetFileResponse: - """Get File - - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_file_v1_files_file_id_get_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_file_v1_files_file_id_get_with_http_info( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetFileResponse]: - """Get File - - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_file_v1_files_file_id_get_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_file_v1_files_file_id_get_without_preload_content( - self, - file_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get File - - - :param file_id: (required) - :type file_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_file_v1_files_file_id_get_serialize( - file_id=file_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_file_v1_files_file_id_get_serialize( - self, - file_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if file_id is not None: - _path_params["file_id"] = file_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/files/{file_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetFineTuneEventsResponse: - """Get Fine Tune Events - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFineTuneEventsResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_with_http_info( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetFineTuneEventsResponse]: - """Get Fine Tune Events - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFineTuneEventsResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_without_preload_content( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Fine Tune Events - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFineTuneEventsResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_serialize( - self, - fine_tune_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if fine_tune_id is not None: - _path_params["fine_tune_id"] = fine_tune_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/llm/fine-tunes/{fine_tune_id}/events", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetFineTuneResponse: - """Get Fine Tune - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_with_http_info( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetFineTuneResponse]: - """Get Fine Tune - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_without_preload_content( - self, - fine_tune_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Fine Tune - - - :param fine_tune_id: (required) - :type fine_tune_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_serialize( - fine_tune_id=fine_tune_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetFineTuneResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_serialize( - self, - fine_tune_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if fine_tune_id is not None: - _path_params["fine_tune_id"] = fine_tune_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/llm/fine-tunes/{fine_tune_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - bundle_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DockerImageBatchJobBundleV1Response: - """Get Latest Docker Image Batch Job Bundle - - Gets latest Docker Image Batch Job Bundle with given name owned by the current owner - - :param bundle_name: (required) - :type bundle_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_serialize( - bundle_name=bundle_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_with_http_info( - self, - bundle_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DockerImageBatchJobBundleV1Response]: - """Get Latest Docker Image Batch Job Bundle - - Gets latest Docker Image Batch Job Bundle with given name owned by the current owner - - :param bundle_name: (required) - :type bundle_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_serialize( - bundle_name=bundle_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_without_preload_content( - self, - bundle_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Latest Docker Image Batch Job Bundle - - Gets latest Docker Image Batch Job Bundle with given name owned by the current owner - - :param bundle_name: (required) - :type bundle_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_serialize( - bundle_name=bundle_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_serialize( - self, - bundle_name, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if bundle_name is not None: - _query_params.append(("bundle_name", bundle_name)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/docker-image-batch-job-bundles/latest", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - model_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ModelBundleV1Response: - """Get Latest Model Bundle - - Gets the latest Model Bundle with the given name owned by the current owner. - - :param model_name: (required) - :type model_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_model_bundle_v1_model_bundles_latest_get_serialize( - model_name=model_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_latest_model_bundle_v1_model_bundles_latest_get_with_http_info( - self, - model_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ModelBundleV1Response]: - """Get Latest Model Bundle - - Gets the latest Model Bundle with the given name owned by the current owner. - - :param model_name: (required) - :type model_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_model_bundle_v1_model_bundles_latest_get_serialize( - model_name=model_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_latest_model_bundle_v1_model_bundles_latest_get_without_preload_content( - self, - model_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Latest Model Bundle - - Gets the latest Model Bundle with the given name owned by the current owner. - - :param model_name: (required) - :type model_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_model_bundle_v1_model_bundles_latest_get_serialize( - model_name=model_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_latest_model_bundle_v1_model_bundles_latest_get_serialize( - self, - model_name, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_name is not None: - _query_params.append(("model_name", model_name)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/model-bundles/latest", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - model_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ModelBundleV2Response: - """Get Latest Model Bundle - - Gets the latest Model Bundle with the given name owned by the current owner. - - :param model_name: (required) - :type model_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_model_bundle_v2_model_bundles_latest_get_serialize( - model_name=model_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_latest_model_bundle_v2_model_bundles_latest_get_with_http_info( - self, - model_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ModelBundleV2Response]: - """Get Latest Model Bundle - - Gets the latest Model Bundle with the given name owned by the current owner. - - :param model_name: (required) - :type model_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_model_bundle_v2_model_bundles_latest_get_serialize( - model_name=model_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_latest_model_bundle_v2_model_bundles_latest_get_without_preload_content( - self, - model_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Latest Model Bundle - - Gets the latest Model Bundle with the given name owned by the current owner. - - :param model_name: (required) - :type model_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_latest_model_bundle_v2_model_bundles_latest_get_serialize( - model_name=model_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_latest_model_bundle_v2_model_bundles_latest_get_serialize( - self, - model_name, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_name is not None: - _query_params.append(("model_name", model_name)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v2/model-bundles/latest", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - model_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ModelBundleV1Response: - """Get Model Bundle - - Gets the details for a given ModelBundle owned by the current owner. - - :param model_bundle_id: (required) - :type model_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_bundle_v1_model_bundles_model_bundle_id_get_serialize( - model_bundle_id=model_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_model_bundle_v1_model_bundles_model_bundle_id_get_with_http_info( - self, - model_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ModelBundleV1Response]: - """Get Model Bundle - - Gets the details for a given ModelBundle owned by the current owner. - - :param model_bundle_id: (required) - :type model_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_bundle_v1_model_bundles_model_bundle_id_get_serialize( - model_bundle_id=model_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_model_bundle_v1_model_bundles_model_bundle_id_get_without_preload_content( - self, - model_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Model Bundle - - Gets the details for a given ModelBundle owned by the current owner. - - :param model_bundle_id: (required) - :type model_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_bundle_v1_model_bundles_model_bundle_id_get_serialize( - model_bundle_id=model_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_serialize( - self, - model_bundle_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_bundle_id is not None: - _path_params["model_bundle_id"] = model_bundle_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/model-bundles/{model_bundle_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - model_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ModelBundleV2Response: - """Get Model Bundle - - Gets the details for a given ModelBundle owned by the current owner. - - :param model_bundle_id: (required) - :type model_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_bundle_v2_model_bundles_model_bundle_id_get_serialize( - model_bundle_id=model_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_model_bundle_v2_model_bundles_model_bundle_id_get_with_http_info( - self, - model_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ModelBundleV2Response]: - """Get Model Bundle - - Gets the details for a given ModelBundle owned by the current owner. - - :param model_bundle_id: (required) - :type model_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_bundle_v2_model_bundles_model_bundle_id_get_serialize( - model_bundle_id=model_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_model_bundle_v2_model_bundles_model_bundle_id_get_without_preload_content( - self, - model_bundle_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Model Bundle - - Gets the details for a given ModelBundle owned by the current owner. - - :param model_bundle_id: (required) - :type model_bundle_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_bundle_v2_model_bundles_model_bundle_id_get_serialize( - model_bundle_id=model_bundle_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ModelBundleV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_serialize( - self, - model_bundle_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_bundle_id is not None: - _path_params["model_bundle_id"] = model_bundle_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v2/model-bundles/{model_bundle_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - model_endpoint_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetLLMModelEndpointV1Response: - """Get Model Endpoint - - Describe the LLM Model endpoint with given name. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_serialize( - model_endpoint_name=model_endpoint_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_with_http_info( - self, - model_endpoint_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetLLMModelEndpointV1Response]: - """Get Model Endpoint - - Describe the LLM Model endpoint with given name. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_serialize( - model_endpoint_name=model_endpoint_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_without_preload_content( - self, - model_endpoint_name: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Model Endpoint - - Describe the LLM Model endpoint with given name. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_serialize( - model_endpoint_name=model_endpoint_name, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_serialize( - self, - model_endpoint_name, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_endpoint_name is not None: - _path_params["model_endpoint_name"] = model_endpoint_name - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/llm/model-endpoints/{model_endpoint_name}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( - self, - model_endpoint_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetModelEndpointV1Response: - """Get Model Endpoint - - Describe the Model endpoint with given ID. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_serialize( - model_endpoint_id=model_endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_with_http_info( - self, - model_endpoint_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetModelEndpointV1Response]: - """Get Model Endpoint - - Describe the Model endpoint with given ID. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_serialize( - model_endpoint_id=model_endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_without_preload_content( - self, - model_endpoint_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Model Endpoint - - Describe the Model endpoint with given ID. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_serialize( - model_endpoint_id=model_endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_serialize( - self, - model_endpoint_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_endpoint_id is not None: - _path_params["model_endpoint_id"] = model_endpoint_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/model-endpoints/{model_endpoint_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> object: - """Get Model Endpoints Api - - Shows the API of the Model Endpoints owned by the current owner. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoints_api_v1_model_endpoints_api_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_model_endpoints_api_v1_model_endpoints_api_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[object]: - """Get Model Endpoints Api - - Shows the API of the Model Endpoints owned by the current owner. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoints_api_v1_model_endpoints_api_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_model_endpoints_api_v1_model_endpoints_api_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Model Endpoints Api - - Shows the API of the Model Endpoints owned by the current owner. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoints_api_v1_model_endpoints_api_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_model_endpoints_api_v1_model_endpoints_api_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/model-endpoints-api", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> object: - """Get Model Endpoints Schema - - Lists the schemas of the Model Endpoints owned by the current owner. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoints_schema_v1_model_endpoints_schema_json_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[object]: - """Get Model Endpoints Schema - - Lists the schemas of the Model Endpoints owned by the current owner. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoints_schema_v1_model_endpoints_schema_json_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Model Endpoints Schema - - Lists the schemas of the Model Endpoints owned by the current owner. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_model_endpoints_schema_v1_model_endpoints_schema_json_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/model-endpoints-schema.json", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def get_trigger_v1_triggers_trigger_id_get( - self, - trigger_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> GetTriggerV1Response: - """Get Trigger - - Describes the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_trigger_v1_triggers_trigger_id_get_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def get_trigger_v1_triggers_trigger_id_get_with_http_info( - self, - trigger_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[GetTriggerV1Response]: - """Get Trigger - - Describes the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_trigger_v1_triggers_trigger_id_get_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def get_trigger_v1_triggers_trigger_id_get_without_preload_content( - self, - trigger_id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get Trigger - - Describes the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_trigger_v1_triggers_trigger_id_get_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "GetTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _get_trigger_v1_triggers_trigger_id_get_serialize( - self, - trigger_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if trigger_id is not None: - _path_params["trigger_id"] = trigger_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/triggers/{trigger_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def healthcheck_healthcheck_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> object: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_healthcheck_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def healthcheck_healthcheck_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[object]: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_healthcheck_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def healthcheck_healthcheck_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_healthcheck_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _healthcheck_healthcheck_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = [] - - return self.api_client.param_serialize( - method="GET", - resource_path="/healthcheck", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def healthcheck_healthz_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> object: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_healthz_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def healthcheck_healthz_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[object]: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_healthz_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def healthcheck_healthz_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_healthz_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _healthcheck_healthz_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = [] - - return self.api_client.param_serialize( - method="GET", - resource_path="/healthz", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def healthcheck_readyz_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> object: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_readyz_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def healthcheck_readyz_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[object]: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_readyz_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def healthcheck_readyz_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Healthcheck - - Returns 200 if the app is healthy. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._healthcheck_readyz_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "object", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _healthcheck_readyz_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = [] - - return self.api_client.param_serialize( - method="GET", - resource_path="/readyz", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - bundle_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListDockerImageBatchJobBundleV1Response: - """List Docker Image Batch Job Model Bundles - - Lists docker image batch job bundles owned by current owner - - :param bundle_name: - :type bundle_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_serialize( - bundle_name=bundle_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListDockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_with_http_info( - self, - bundle_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListDockerImageBatchJobBundleV1Response]: - """List Docker Image Batch Job Model Bundles - - Lists docker image batch job bundles owned by current owner - - :param bundle_name: - :type bundle_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_serialize( - bundle_name=bundle_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListDockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_without_preload_content( - self, - bundle_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Docker Image Batch Job Model Bundles - - Lists docker image batch job bundles owned by current owner - - :param bundle_name: - :type bundle_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_serialize( - bundle_name=bundle_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListDockerImageBatchJobBundleV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_serialize( - self, - bundle_name, - order_by, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if bundle_name is not None: - _query_params.append(("bundle_name", bundle_name)) - - if order_by is not None: - _query_params.append(("order_by", order_by.value)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/docker-image-batch-job-bundles", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - trigger_id: Optional[StrictStr] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListDockerImageBatchJobsV1Response: - """List Docker Image Batch Jobs - - Lists docker image batch jobs spawned by trigger with given ID - - :param trigger_id: - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListDockerImageBatchJobsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_with_http_info( - self, - trigger_id: Optional[StrictStr] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListDockerImageBatchJobsV1Response]: - """List Docker Image Batch Jobs - - Lists docker image batch jobs spawned by trigger with given ID - - :param trigger_id: - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListDockerImageBatchJobsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_without_preload_content( - self, - trigger_id: Optional[StrictStr] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Docker Image Batch Jobs - - Lists docker image batch jobs spawned by trigger with given ID - - :param trigger_id: - :type trigger_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_serialize( - trigger_id=trigger_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListDockerImageBatchJobsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_serialize( - self, - trigger_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if trigger_id is not None: - _query_params.append(("trigger_id", trigger_id)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/docker-image-batch-jobs", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_files_v1_files_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListFilesResponse: - """List Files - - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_files_v1_files_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListFilesResponse", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_files_v1_files_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListFilesResponse]: - """List Files - - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_files_v1_files_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListFilesResponse", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_files_v1_files_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Files - - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_files_v1_files_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListFilesResponse", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_files_v1_files_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/files", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListFineTunesResponse: - """List Fine Tunes - - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_fine_tunes_v1_llm_fine_tunes_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListFineTunesResponse", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_fine_tunes_v1_llm_fine_tunes_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListFineTunesResponse]: - """List Fine Tunes - - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_fine_tunes_v1_llm_fine_tunes_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListFineTunesResponse", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_fine_tunes_v1_llm_fine_tunes_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Fine Tunes - - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_fine_tunes_v1_llm_fine_tunes_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListFineTunesResponse", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_fine_tunes_v1_llm_fine_tunes_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/llm/fine-tunes", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_model_bundles_v1_model_bundles_get( - self, - model_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListModelBundlesV1Response: - """List Model Bundles - - Lists the ModelBundles owned by the current owner. - - :param model_name: - :type model_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_bundles_v1_model_bundles_get_serialize( - model_name=model_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelBundlesV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_model_bundles_v1_model_bundles_get_with_http_info( - self, - model_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListModelBundlesV1Response]: - """List Model Bundles - - Lists the ModelBundles owned by the current owner. - - :param model_name: - :type model_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_bundles_v1_model_bundles_get_serialize( - model_name=model_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelBundlesV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_model_bundles_v1_model_bundles_get_without_preload_content( - self, - model_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Model Bundles - - Lists the ModelBundles owned by the current owner. - - :param model_name: - :type model_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_bundles_v1_model_bundles_get_serialize( - model_name=model_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelBundlesV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_model_bundles_v1_model_bundles_get_serialize( - self, - model_name, - order_by, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_name is not None: - _query_params.append(("model_name", model_name)) - - if order_by is not None: - _query_params.append(("order_by", order_by.value)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/model-bundles", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_model_bundles_v2_model_bundles_get( - self, - model_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListModelBundlesV2Response: - """List Model Bundles - - Lists the ModelBundles owned by the current owner. - - :param model_name: - :type model_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_bundles_v2_model_bundles_get_serialize( - model_name=model_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelBundlesV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_model_bundles_v2_model_bundles_get_with_http_info( - self, - model_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListModelBundlesV2Response]: - """List Model Bundles - - Lists the ModelBundles owned by the current owner. - - :param model_name: - :type model_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_bundles_v2_model_bundles_get_serialize( - model_name=model_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelBundlesV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_model_bundles_v2_model_bundles_get_without_preload_content( - self, - model_name: Optional[StrictStr] = None, - order_by: Optional[ModelBundleOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Model Bundles - - Lists the ModelBundles owned by the current owner. - - :param model_name: - :type model_name: str - :param order_by: - :type order_by: ModelBundleOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_bundles_v2_model_bundles_get_serialize( - model_name=model_name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelBundlesV2Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_model_bundles_v2_model_bundles_get_serialize( - self, - model_name, - order_by, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model_name is not None: - _query_params.append(("model_name", model_name)) - - if order_by is not None: - _query_params.append(("order_by", order_by.value)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v2/model-bundles", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - name: Optional[StrictStr] = None, - order_by: Optional[ModelEndpointOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListLLMModelEndpointsV1Response: - """List Model Endpoints - - Lists the LLM model endpoints owned by the current owner, plus all public_inference LLMs. - - :param name: - :type name: str - :param order_by: - :type order_by: ModelEndpointOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_endpoints_v1_llm_model_endpoints_get_serialize( - name=name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListLLMModelEndpointsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_model_endpoints_v1_llm_model_endpoints_get_with_http_info( - self, - name: Optional[StrictStr] = None, - order_by: Optional[ModelEndpointOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListLLMModelEndpointsV1Response]: - """List Model Endpoints - - Lists the LLM model endpoints owned by the current owner, plus all public_inference LLMs. - - :param name: - :type name: str - :param order_by: - :type order_by: ModelEndpointOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_endpoints_v1_llm_model_endpoints_get_serialize( - name=name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListLLMModelEndpointsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_model_endpoints_v1_llm_model_endpoints_get_without_preload_content( - self, - name: Optional[StrictStr] = None, - order_by: Optional[ModelEndpointOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Model Endpoints - - Lists the LLM model endpoints owned by the current owner, plus all public_inference LLMs. - - :param name: - :type name: str - :param order_by: - :type order_by: ModelEndpointOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_endpoints_v1_llm_model_endpoints_get_serialize( - name=name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListLLMModelEndpointsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_model_endpoints_v1_llm_model_endpoints_get_serialize( - self, - name, - order_by, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if name is not None: - _query_params.append(("name", name)) - - if order_by is not None: - _query_params.append(("order_by", order_by.value)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/llm/model-endpoints", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_model_endpoints_v1_model_endpoints_get( - self, - name: Optional[StrictStr] = None, - order_by: Optional[ModelEndpointOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListModelEndpointsV1Response: - """List Model Endpoints - - Lists the Models owned by the current owner. - - :param name: - :type name: str - :param order_by: - :type order_by: ModelEndpointOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_endpoints_v1_model_endpoints_get_serialize( - name=name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelEndpointsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_model_endpoints_v1_model_endpoints_get_with_http_info( - self, - name: Optional[StrictStr] = None, - order_by: Optional[ModelEndpointOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListModelEndpointsV1Response]: - """List Model Endpoints - - Lists the Models owned by the current owner. - - :param name: - :type name: str - :param order_by: - :type order_by: ModelEndpointOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_endpoints_v1_model_endpoints_get_serialize( - name=name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelEndpointsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_model_endpoints_v1_model_endpoints_get_without_preload_content( - self, - name: Optional[StrictStr] = None, - order_by: Optional[ModelEndpointOrderBy] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Model Endpoints - - Lists the Models owned by the current owner. - - :param name: - :type name: str - :param order_by: - :type order_by: ModelEndpointOrderBy - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_model_endpoints_v1_model_endpoints_get_serialize( - name=name, - order_by=order_by, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListModelEndpointsV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_model_endpoints_v1_model_endpoints_get_serialize( - self, - name, - order_by, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if name is not None: - _query_params.append(("name", name)) - - if order_by is not None: - _query_params.append(("order_by", order_by.value)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/model-endpoints", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def list_triggers_v1_triggers_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListTriggersV1Response: - """List Triggers - - Lists descriptions of all triggers - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_triggers_v1_triggers_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListTriggersV1Response", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def list_triggers_v1_triggers_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListTriggersV1Response]: - """List Triggers - - Lists descriptions of all triggers - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_triggers_v1_triggers_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListTriggersV1Response", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def list_triggers_v1_triggers_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List Triggers - - Lists descriptions of all triggers - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_triggers_v1_triggers_get_serialize( - _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListTriggersV1Response", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _list_triggers_v1_triggers_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/v1/triggers", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - batch_job_id: StrictStr, - update_batch_job_v1_request: UpdateBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> UpdateBatchJobV1Response: - """Update Batch Job - - Updates a batch job. - - :param batch_job_id: (required) - :type batch_job_id: str - :param update_batch_job_v1_request: (required) - :type update_batch_job_v1_request: UpdateBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_batch_job_v1_batch_jobs_batch_job_id_put_serialize( - batch_job_id=batch_job_id, - update_batch_job_v1_request=update_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def update_batch_job_v1_batch_jobs_batch_job_id_put_with_http_info( - self, - batch_job_id: StrictStr, - update_batch_job_v1_request: UpdateBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[UpdateBatchJobV1Response]: - """Update Batch Job - - Updates a batch job. - - :param batch_job_id: (required) - :type batch_job_id: str - :param update_batch_job_v1_request: (required) - :type update_batch_job_v1_request: UpdateBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_batch_job_v1_batch_jobs_batch_job_id_put_serialize( - batch_job_id=batch_job_id, - update_batch_job_v1_request=update_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def update_batch_job_v1_batch_jobs_batch_job_id_put_without_preload_content( - self, - batch_job_id: StrictStr, - update_batch_job_v1_request: UpdateBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Update Batch Job - - Updates a batch job. - - :param batch_job_id: (required) - :type batch_job_id: str - :param update_batch_job_v1_request: (required) - :type update_batch_job_v1_request: UpdateBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_batch_job_v1_batch_jobs_batch_job_id_put_serialize( - batch_job_id=batch_job_id, - update_batch_job_v1_request=update_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _update_batch_job_v1_batch_jobs_batch_job_id_put_serialize( - self, - batch_job_id, - update_batch_job_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if batch_job_id is not None: - _path_params["batch_job_id"] = batch_job_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if update_batch_job_v1_request is not None: - _body_params = update_batch_job_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="PUT", - resource_path="/v1/batch-jobs/{batch_job_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - batch_job_id: StrictStr, - update_docker_image_batch_job_v1_request: UpdateDockerImageBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> UpdateDockerImageBatchJobV1Response: - """Update Docker Image Batch Job - - - :param batch_job_id: (required) - :type batch_job_id: str - :param update_docker_image_batch_job_v1_request: (required) - :type update_docker_image_batch_job_v1_request: UpdateDockerImageBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_serialize( - batch_job_id=batch_job_id, - update_docker_image_batch_job_v1_request=update_docker_image_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_with_http_info( - self, - batch_job_id: StrictStr, - update_docker_image_batch_job_v1_request: UpdateDockerImageBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[UpdateDockerImageBatchJobV1Response]: - """Update Docker Image Batch Job - - - :param batch_job_id: (required) - :type batch_job_id: str - :param update_docker_image_batch_job_v1_request: (required) - :type update_docker_image_batch_job_v1_request: UpdateDockerImageBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_serialize( - batch_job_id=batch_job_id, - update_docker_image_batch_job_v1_request=update_docker_image_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_without_preload_content( - self, - batch_job_id: StrictStr, - update_docker_image_batch_job_v1_request: UpdateDockerImageBatchJobV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Update Docker Image Batch Job - - - :param batch_job_id: (required) - :type batch_job_id: str - :param update_docker_image_batch_job_v1_request: (required) - :type update_docker_image_batch_job_v1_request: UpdateDockerImageBatchJobV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_serialize( - batch_job_id=batch_job_id, - update_docker_image_batch_job_v1_request=update_docker_image_batch_job_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateDockerImageBatchJobV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_serialize( - self, - batch_job_id, - update_docker_image_batch_job_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if batch_job_id is not None: - _path_params["batch_job_id"] = batch_job_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if update_docker_image_batch_job_v1_request is not None: - _body_params = update_docker_image_batch_job_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="PUT", - resource_path="/v1/docker-image-batch-jobs/{batch_job_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( - self, - model_endpoint_name: StrictStr, - update_llm_model_endpoint_v1_request: UpdateLLMModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> UpdateLLMModelEndpointV1Response: - """Update Model Endpoint - - Updates an LLM endpoint for the current user. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param update_llm_model_endpoint_v1_request: (required) - :type update_llm_model_endpoint_v1_request: UpdateLLMModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_serialize( - model_endpoint_name=model_endpoint_name, - update_llm_model_endpoint_v1_request=update_llm_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_with_http_info( - self, - model_endpoint_name: StrictStr, - update_llm_model_endpoint_v1_request: UpdateLLMModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[UpdateLLMModelEndpointV1Response]: - """Update Model Endpoint - - Updates an LLM endpoint for the current user. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param update_llm_model_endpoint_v1_request: (required) - :type update_llm_model_endpoint_v1_request: UpdateLLMModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_serialize( - model_endpoint_name=model_endpoint_name, - update_llm_model_endpoint_v1_request=update_llm_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_without_preload_content( - self, - model_endpoint_name: StrictStr, - update_llm_model_endpoint_v1_request: UpdateLLMModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Update Model Endpoint - - Updates an LLM endpoint for the current user. - - :param model_endpoint_name: (required) - :type model_endpoint_name: str - :param update_llm_model_endpoint_v1_request: (required) - :type update_llm_model_endpoint_v1_request: UpdateLLMModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_serialize( - model_endpoint_name=model_endpoint_name, - update_llm_model_endpoint_v1_request=update_llm_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateLLMModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_serialize( - self, - model_endpoint_name, - update_llm_model_endpoint_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_endpoint_name is not None: - _path_params["model_endpoint_name"] = model_endpoint_name - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if update_llm_model_endpoint_v1_request is not None: - _body_params = update_llm_model_endpoint_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="PUT", - resource_path="/v1/llm/model-endpoints/{model_endpoint_name}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - model_endpoint_id: StrictStr, - update_model_endpoint_v1_request: UpdateModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> UpdateModelEndpointV1Response: - """Update Model Endpoint - - Lists the Models owned by the current owner. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param update_model_endpoint_v1_request: (required) - :type update_model_endpoint_v1_request: UpdateModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_serialize( - model_endpoint_id=model_endpoint_id, - update_model_endpoint_v1_request=update_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_with_http_info( - self, - model_endpoint_id: StrictStr, - update_model_endpoint_v1_request: UpdateModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[UpdateModelEndpointV1Response]: - """Update Model Endpoint - - Lists the Models owned by the current owner. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param update_model_endpoint_v1_request: (required) - :type update_model_endpoint_v1_request: UpdateModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_serialize( - model_endpoint_id=model_endpoint_id, - update_model_endpoint_v1_request=update_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_without_preload_content( - self, - model_endpoint_id: StrictStr, - update_model_endpoint_v1_request: UpdateModelEndpointV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Update Model Endpoint - - Lists the Models owned by the current owner. - - :param model_endpoint_id: (required) - :type model_endpoint_id: str - :param update_model_endpoint_v1_request: (required) - :type update_model_endpoint_v1_request: UpdateModelEndpointV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_serialize( - model_endpoint_id=model_endpoint_id, - update_model_endpoint_v1_request=update_model_endpoint_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateModelEndpointV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_serialize( - self, - model_endpoint_id, - update_model_endpoint_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if model_endpoint_id is not None: - _path_params["model_endpoint_id"] = model_endpoint_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if update_model_endpoint_v1_request is not None: - _body_params = update_model_endpoint_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="PUT", - resource_path="/v1/model-endpoints/{model_endpoint_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def update_trigger_v1_triggers_trigger_id_put( - self, - trigger_id: StrictStr, - update_trigger_v1_request: UpdateTriggerV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> UpdateTriggerV1Response: - """Update Trigger - - Updates the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param update_trigger_v1_request: (required) - :type update_trigger_v1_request: UpdateTriggerV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_trigger_v1_triggers_trigger_id_put_serialize( - trigger_id=trigger_id, - update_trigger_v1_request=update_trigger_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def update_trigger_v1_triggers_trigger_id_put_with_http_info( - self, - trigger_id: StrictStr, - update_trigger_v1_request: UpdateTriggerV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[UpdateTriggerV1Response]: - """Update Trigger - - Updates the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param update_trigger_v1_request: (required) - :type update_trigger_v1_request: UpdateTriggerV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_trigger_v1_triggers_trigger_id_put_serialize( - trigger_id=trigger_id, - update_trigger_v1_request=update_trigger_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def update_trigger_v1_triggers_trigger_id_put_without_preload_content( - self, - trigger_id: StrictStr, - update_trigger_v1_request: UpdateTriggerV1Request, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Update Trigger - - Updates the trigger with the given ID - - :param trigger_id: (required) - :type trigger_id: str - :param update_trigger_v1_request: (required) - :type update_trigger_v1_request: UpdateTriggerV1Request - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_trigger_v1_triggers_trigger_id_put_serialize( - trigger_id=trigger_id, - update_trigger_v1_request=update_trigger_v1_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UpdateTriggerV1Response", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _update_trigger_v1_triggers_trigger_id_put_serialize( - self, - trigger_id, - update_trigger_v1_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if trigger_id is not None: - _path_params["trigger_id"] = trigger_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if update_trigger_v1_request is not None: - _body_params = update_trigger_v1_request - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["application/json"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="PUT", - resource_path="/v1/triggers/{trigger_id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - def upload_file_v1_files_post( - self, - file: Union[StrictBytes, StrictStr], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> UploadFileResponse: - """Upload File - - - :param file: (required) - :type file: bytearray - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._upload_file_v1_files_post_serialize( - file=file, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UploadFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - def upload_file_v1_files_post_with_http_info( - self, - file: Union[StrictBytes, StrictStr], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[UploadFileResponse]: - """Upload File - - - :param file: (required) - :type file: bytearray - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._upload_file_v1_files_post_serialize( - file=file, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UploadFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - def upload_file_v1_files_post_without_preload_content( - self, - file: Union[StrictBytes, StrictStr], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)]], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Upload File - - - :param file: (required) - :type file: bytearray - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._upload_file_v1_files_post_serialize( - file=file, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "UploadFileResponse", - "422": "HTTPValidationError", - } - response_data = self.api_client.call_api(*_param, _request_timeout=_request_timeout) - return response_data.response - - def _upload_file_v1_files_post_serialize( - self, - file, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[str, Union[str, bytes]] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - if file is not None: - _files["file"] = file - # process the body parameter - - # set the HTTP header `Accept` - _header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type(["multipart/form-data"]) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["HTTPBasic"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/v1/files", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/launch/api_client/api_client.py b/launch/api_client/api_client.py deleted file mode 100644 index a417ae3f..00000000 --- a/launch/api_client/api_client.py +++ /dev/null @@ -1,1499 +0,0 @@ -# coding: utf-8 -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import atexit -import email -import enum -import io -import json -import os -import re -import tempfile -import typing -from dataclasses import dataclass -from decimal import Decimal -from multiprocessing.pool import ThreadPool -from urllib.parse import quote, urlparse - -import frozendict -import typing_extensions -import urllib3 -from urllib3._collections import HTTPHeaderDict -from urllib3.fields import RequestField as RequestFieldBase - -from launch.api_client import rest -from launch.api_client.configuration import Configuration -from launch.api_client.exceptions import ApiTypeError, ApiValueError -from launch.api_client.schemas import ( - BinarySchema, - BoolClass, - FileIO, - NoneClass, - Schema, - Unset, - date, - datetime, - none_type, - unset, -) - - -class RequestField(RequestFieldBase): - def __eq__(self, other): - if not isinstance(other, RequestField): - return False - return self.__dict__ == other.__dict__ - - -class JSONEncoder(json.JSONEncoder): - compact_separators = (',', ':') - - def default(self, obj): - if isinstance(obj, str): - return str(obj) - elif isinstance(obj, float): - return float(obj) - elif isinstance(obj, int): - return int(obj) - elif isinstance(obj, Decimal): - if obj.as_tuple().exponent >= 0: - return int(obj) - return float(obj) - elif isinstance(obj, NoneClass): - return None - elif isinstance(obj, BoolClass): - return bool(obj) - elif isinstance(obj, (dict, frozendict.frozendict)): - return {key: self.default(val) for key, val in obj.items()} - elif isinstance(obj, (list, tuple)): - return [self.default(item) for item in obj] - raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__)) - - -class ParameterInType(enum.Enum): - QUERY = 'query' - HEADER = 'header' - PATH = 'path' - COOKIE = 'cookie' - - -class ParameterStyle(enum.Enum): - MATRIX = 'matrix' - LABEL = 'label' - FORM = 'form' - SIMPLE = 'simple' - SPACE_DELIMITED = 'spaceDelimited' - PIPE_DELIMITED = 'pipeDelimited' - DEEP_OBJECT = 'deepObject' - - -class PrefixSeparatorIterator: - # A class to store prefixes and separators for rfc6570 expansions - - def __init__(self, prefix: str, separator: str): - self.prefix = prefix - self.separator = separator - self.first = True - if separator in {'.', '|', '%20'}: - item_separator = separator - else: - item_separator = ',' - self.item_separator = item_separator - - def __iter__(self): - return self - - def __next__(self): - if self.first: - self.first = False - return self.prefix - return self.separator - - -class ParameterSerializerBase: - @classmethod - def _get_default_explode(cls, style: ParameterStyle) -> bool: - return False - - @staticmethod - def __ref6570_item_value(in_data: typing.Any, percent_encode: bool): - """ - Get representation if str/float/int/None/items in list/ values in dict - None is returned if an item is undefined, use cases are value= - - None - - [] - - {} - - [None, None None] - - {'a': None, 'b': None} - """ - if type(in_data) in {str, float, int}: - if percent_encode: - return quote(str(in_data)) - return str(in_data) - elif isinstance(in_data, none_type): - # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 - return None - elif isinstance(in_data, list) and not in_data: - # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 - return None - elif isinstance(in_data, dict) and not in_data: - # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 - return None - raise ApiValueError('Unable to generate a ref6570 item representation of {}'.format(in_data)) - - @staticmethod - def _to_dict(name: str, value: str): - return {name: value} - - @classmethod - def __ref6570_str_float_int_expansion( - cls, - variable_name: str, - in_data: typing.Any, - explode: bool, - percent_encode: bool, - prefix_separator_iterator: PrefixSeparatorIterator, - var_name_piece: str, - named_parameter_expansion: bool - ) -> str: - item_value = cls.__ref6570_item_value(in_data, percent_encode) - if item_value is None or (item_value == '' and prefix_separator_iterator.separator == ';'): - return next(prefix_separator_iterator) + var_name_piece - value_pair_equals = '=' if named_parameter_expansion else '' - return next(prefix_separator_iterator) + var_name_piece + value_pair_equals + item_value - - @classmethod - def __ref6570_list_expansion( - cls, - variable_name: str, - in_data: typing.Any, - explode: bool, - percent_encode: bool, - prefix_separator_iterator: PrefixSeparatorIterator, - var_name_piece: str, - named_parameter_expansion: bool - ) -> str: - item_values = [cls.__ref6570_item_value(v, percent_encode) for v in in_data] - item_values = [v for v in item_values if v is not None] - if not item_values: - # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 - return "" - value_pair_equals = '=' if named_parameter_expansion else '' - if not explode: - return ( - next(prefix_separator_iterator) + - var_name_piece + - value_pair_equals + - prefix_separator_iterator.item_separator.join(item_values) - ) - # exploded - return next(prefix_separator_iterator) + next(prefix_separator_iterator).join( - [var_name_piece + value_pair_equals + val for val in item_values] - ) - - @classmethod - def __ref6570_dict_expansion( - cls, - variable_name: str, - in_data: typing.Any, - explode: bool, - percent_encode: bool, - prefix_separator_iterator: PrefixSeparatorIterator, - var_name_piece: str, - named_parameter_expansion: bool - ) -> str: - in_data_transformed = {key: cls.__ref6570_item_value(val, percent_encode) for key, val in in_data.items()} - in_data_transformed = {key: val for key, val in in_data_transformed.items() if val is not None} - if not in_data_transformed: - # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 - return "" - value_pair_equals = '=' if named_parameter_expansion else '' - if not explode: - return ( - next(prefix_separator_iterator) + - var_name_piece + value_pair_equals + - prefix_separator_iterator.item_separator.join( - prefix_separator_iterator.item_separator.join( - item_pair - ) for item_pair in in_data_transformed.items() - ) - ) - # exploded - return next(prefix_separator_iterator) + next(prefix_separator_iterator).join( - [key + '=' + val for key, val in in_data_transformed.items()] - ) - - @classmethod - def _ref6570_expansion( - cls, - variable_name: str, - in_data: typing.Any, - explode: bool, - percent_encode: bool, - prefix_separator_iterator: PrefixSeparatorIterator - ) -> str: - """ - Separator is for separate variables like dict with explode true, not for array item separation - """ - named_parameter_expansion = prefix_separator_iterator.separator in {'&', ';'} - var_name_piece = variable_name if named_parameter_expansion else '' - if type(in_data) in {str, float, int}: - return cls.__ref6570_str_float_int_expansion( - variable_name, - in_data, - explode, - percent_encode, - prefix_separator_iterator, - var_name_piece, - named_parameter_expansion - ) - elif isinstance(in_data, none_type): - # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 - return "" - elif isinstance(in_data, list): - return cls.__ref6570_list_expansion( - variable_name, - in_data, - explode, - percent_encode, - prefix_separator_iterator, - var_name_piece, - named_parameter_expansion - ) - elif isinstance(in_data, dict): - return cls.__ref6570_dict_expansion( - variable_name, - in_data, - explode, - percent_encode, - prefix_separator_iterator, - var_name_piece, - named_parameter_expansion - ) - # bool, bytes, etc - raise ApiValueError('Unable to generate a ref6570 representation of {}'.format(in_data)) - - -class StyleFormSerializer(ParameterSerializerBase): - @classmethod - def _get_default_explode(cls, style: ParameterStyle) -> bool: - if style is ParameterStyle.FORM: - return True - return super()._get_default_explode(style) - - def _serialize_form( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list], - name: str, - explode: bool, - percent_encode: bool, - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None - ) -> str: - if prefix_separator_iterator is None: - prefix_separator_iterator = PrefixSeparatorIterator('', '&') - return self._ref6570_expansion( - variable_name=name, - in_data=in_data, - explode=explode, - percent_encode=percent_encode, - prefix_separator_iterator=prefix_separator_iterator - ) - - -class StyleSimpleSerializer(ParameterSerializerBase): - - def _serialize_simple( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list], - name: str, - explode: bool, - percent_encode: bool - ) -> str: - prefix_separator_iterator = PrefixSeparatorIterator('', ',') - return self._ref6570_expansion( - variable_name=name, - in_data=in_data, - explode=explode, - percent_encode=percent_encode, - prefix_separator_iterator=prefix_separator_iterator - ) - - -class JSONDetector: - """ - Works for: - application/json - application/json; charset=UTF-8 - application/json-patch+json - application/geo+json - """ - __json_content_type_pattern = re.compile("application/[^+]*[+]?(json);?.*") - - @classmethod - def _content_type_is_json(cls, content_type: str) -> bool: - if cls.__json_content_type_pattern.match(content_type): - return True - return False - - -@dataclass -class ParameterBase(JSONDetector): - name: str - in_type: ParameterInType - required: bool - style: typing.Optional[ParameterStyle] - explode: typing.Optional[bool] - allow_reserved: typing.Optional[bool] - schema: typing.Optional[typing.Type[Schema]] - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] - - __style_to_in_type = { - ParameterStyle.MATRIX: {ParameterInType.PATH}, - ParameterStyle.LABEL: {ParameterInType.PATH}, - ParameterStyle.FORM: {ParameterInType.QUERY, ParameterInType.COOKIE}, - ParameterStyle.SIMPLE: {ParameterInType.PATH, ParameterInType.HEADER}, - ParameterStyle.SPACE_DELIMITED: {ParameterInType.QUERY}, - ParameterStyle.PIPE_DELIMITED: {ParameterInType.QUERY}, - ParameterStyle.DEEP_OBJECT: {ParameterInType.QUERY}, - } - __in_type_to_default_style = { - ParameterInType.QUERY: ParameterStyle.FORM, - ParameterInType.PATH: ParameterStyle.SIMPLE, - ParameterInType.HEADER: ParameterStyle.SIMPLE, - ParameterInType.COOKIE: ParameterStyle.FORM, - } - __disallowed_header_names = {'Accept', 'Content-Type', 'Authorization'} - _json_encoder = JSONEncoder() - - @classmethod - def __verify_style_to_in_type(cls, style: typing.Optional[ParameterStyle], in_type: ParameterInType): - if style is None: - return - in_type_set = cls.__style_to_in_type[style] - if in_type not in in_type_set: - raise ValueError( - 'Invalid style and in_type combination. For style={} only in_type={} are allowed'.format( - style, in_type_set - ) - ) - - def __init__( - self, - name: str, - in_type: ParameterInType, - required: bool = False, - style: typing.Optional[ParameterStyle] = None, - explode: bool = False, - allow_reserved: typing.Optional[bool] = None, - schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None - ): - if schema is None and content is None: - raise ValueError('Value missing; Pass in either schema or content') - if schema and content: - raise ValueError('Too many values provided. Both schema and content were provided. Only one may be input') - if name in self.__disallowed_header_names and in_type is ParameterInType.HEADER: - raise ValueError('Invalid name, name may not be one of {}'.format(self.__disallowed_header_names)) - self.__verify_style_to_in_type(style, in_type) - if content is None and style is None: - style = self.__in_type_to_default_style[in_type] - if content is not None and in_type in self.__in_type_to_default_style and len(content) != 1: - raise ValueError('Invalid content length, content length must equal 1') - self.in_type = in_type - self.name = name - self.required = required - self.style = style - self.explode = explode - self.allow_reserved = allow_reserved - self.schema = schema - self.content = content - - def _serialize_json( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list], - eliminate_whitespace: bool = False - ) -> str: - if eliminate_whitespace: - return json.dumps(in_data, separators=self._json_encoder.compact_separators) - return json.dumps(in_data) - - -class PathParameter(ParameterBase, StyleSimpleSerializer): - - def __init__( - self, - name: str, - required: bool = False, - style: typing.Optional[ParameterStyle] = None, - explode: bool = False, - allow_reserved: typing.Optional[bool] = None, - schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None - ): - super().__init__( - name, - in_type=ParameterInType.PATH, - required=required, - style=style, - explode=explode, - allow_reserved=allow_reserved, - schema=schema, - content=content - ) - - def __serialize_label( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list] - ) -> typing.Dict[str, str]: - prefix_separator_iterator = PrefixSeparatorIterator('.', '.') - value = self._ref6570_expansion( - variable_name=self.name, - in_data=in_data, - explode=self.explode, - percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator - ) - return self._to_dict(self.name, value) - - def __serialize_matrix( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list] - ) -> typing.Dict[str, str]: - prefix_separator_iterator = PrefixSeparatorIterator(';', ';') - value = self._ref6570_expansion( - variable_name=self.name, - in_data=in_data, - explode=self.explode, - percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator - ) - return self._to_dict(self.name, value) - - def __serialize_simple( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list], - ) -> typing.Dict[str, str]: - value = self._serialize_simple( - in_data=in_data, - name=self.name, - explode=self.explode, - percent_encode=True - ) - return self._to_dict(self.name, value) - - def serialize( - self, - in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict] - ) -> typing.Dict[str, str]: - if self.schema: - cast_in_data = self.schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - """ - simple -> path - path: - returns path_params: dict - label -> path - returns path_params - matrix -> path - returns path_params - """ - if self.style: - if self.style is ParameterStyle.SIMPLE: - return self.__serialize_simple(cast_in_data) - elif self.style is ParameterStyle.LABEL: - return self.__serialize_label(cast_in_data) - elif self.style is ParameterStyle.MATRIX: - return self.__serialize_matrix(cast_in_data) - # self.content will be length one - for content_type, schema in self.content.items(): - cast_in_data = schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - if self._content_type_is_json(content_type): - value = self._serialize_json(cast_in_data) - return self._to_dict(self.name, value) - raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) - - -class QueryParameter(ParameterBase, StyleFormSerializer): - - def __init__( - self, - name: str, - required: bool = False, - style: typing.Optional[ParameterStyle] = None, - explode: typing.Optional[bool] = None, - allow_reserved: typing.Optional[bool] = None, - schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None - ): - used_style = ParameterStyle.FORM if style is None else style - used_explode = self._get_default_explode(used_style) if explode is None else explode - - super().__init__( - name, - in_type=ParameterInType.QUERY, - required=required, - style=used_style, - explode=used_explode, - allow_reserved=allow_reserved, - schema=schema, - content=content - ) - - def __serialize_space_delimited( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] - ) -> typing.Dict[str, str]: - if prefix_separator_iterator is None: - prefix_separator_iterator = self.get_prefix_separator_iterator() - value = self._ref6570_expansion( - variable_name=self.name, - in_data=in_data, - explode=self.explode, - percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator - ) - return self._to_dict(self.name, value) - - def __serialize_pipe_delimited( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] - ) -> typing.Dict[str, str]: - if prefix_separator_iterator is None: - prefix_separator_iterator = self.get_prefix_separator_iterator() - value = self._ref6570_expansion( - variable_name=self.name, - in_data=in_data, - explode=self.explode, - percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator - ) - return self._to_dict(self.name, value) - - def __serialize_form( - self, - in_data: typing.Union[None, int, float, str, bool, dict, list], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] - ) -> typing.Dict[str, str]: - if prefix_separator_iterator is None: - prefix_separator_iterator = self.get_prefix_separator_iterator() - value = self._serialize_form( - in_data, - name=self.name, - explode=self.explode, - percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator - ) - return self._to_dict(self.name, value) - - def get_prefix_separator_iterator(self) -> typing.Optional[PrefixSeparatorIterator]: - if self.style is ParameterStyle.FORM: - return PrefixSeparatorIterator('?', '&') - elif self.style is ParameterStyle.SPACE_DELIMITED: - return PrefixSeparatorIterator('', '%20') - elif self.style is ParameterStyle.PIPE_DELIMITED: - return PrefixSeparatorIterator('', '|') - - def serialize( - self, - in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None - ) -> typing.Dict[str, str]: - if self.schema: - cast_in_data = self.schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - """ - form -> query - query: - - GET/HEAD/DELETE: could use fields - - PUT/POST: must use urlencode to send parameters - returns fields: tuple - spaceDelimited -> query - returns fields - pipeDelimited -> query - returns fields - deepObject -> query, https://github.com/OAI/OpenAPI-Specification/issues/1706 - returns fields - """ - if self.style: - # TODO update query ones to omit setting values when [] {} or None is input - if self.style is ParameterStyle.FORM: - return self.__serialize_form(cast_in_data, prefix_separator_iterator) - elif self.style is ParameterStyle.SPACE_DELIMITED: - return self.__serialize_space_delimited(cast_in_data, prefix_separator_iterator) - elif self.style is ParameterStyle.PIPE_DELIMITED: - return self.__serialize_pipe_delimited(cast_in_data, prefix_separator_iterator) - # self.content will be length one - if prefix_separator_iterator is None: - prefix_separator_iterator = self.get_prefix_separator_iterator() - for content_type, schema in self.content.items(): - cast_in_data = schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - if self._content_type_is_json(content_type): - value = self._serialize_json(cast_in_data, eliminate_whitespace=True) - return self._to_dict( - self.name, - next(prefix_separator_iterator) + self.name + '=' + quote(value) - ) - raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) - - -class CookieParameter(ParameterBase, StyleFormSerializer): - - def __init__( - self, - name: str, - required: bool = False, - style: typing.Optional[ParameterStyle] = None, - explode: typing.Optional[bool] = None, - allow_reserved: typing.Optional[bool] = None, - schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None - ): - used_style = ParameterStyle.FORM if style is None and content is None and schema else style - used_explode = self._get_default_explode(used_style) if explode is None else explode - - super().__init__( - name, - in_type=ParameterInType.COOKIE, - required=required, - style=used_style, - explode=used_explode, - allow_reserved=allow_reserved, - schema=schema, - content=content - ) - - def serialize( - self, - in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict] - ) -> typing.Dict[str, str]: - if self.schema: - cast_in_data = self.schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - """ - form -> cookie - returns fields: tuple - """ - if self.style: - """ - TODO add escaping of comma, space, equals - or turn encoding on - """ - value = self._serialize_form( - cast_in_data, - explode=self.explode, - name=self.name, - percent_encode=False, - prefix_separator_iterator=PrefixSeparatorIterator('', '&') - ) - return self._to_dict(self.name, value) - # self.content will be length one - for content_type, schema in self.content.items(): - cast_in_data = schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - if self._content_type_is_json(content_type): - value = self._serialize_json(cast_in_data) - return self._to_dict(self.name, value) - raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) - - -class HeaderParameter(ParameterBase, StyleSimpleSerializer): - def __init__( - self, - name: str, - required: bool = False, - style: typing.Optional[ParameterStyle] = None, - explode: bool = False, - allow_reserved: typing.Optional[bool] = None, - schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None - ): - super().__init__( - name, - in_type=ParameterInType.HEADER, - required=required, - style=style, - explode=explode, - allow_reserved=allow_reserved, - schema=schema, - content=content - ) - - @staticmethod - def __to_headers(in_data: typing.Tuple[typing.Tuple[str, str], ...]) -> HTTPHeaderDict: - data = tuple(t for t in in_data if t) - headers = HTTPHeaderDict() - if not data: - return headers - headers.extend(data) - return headers - - def serialize( - self, - in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict] - ) -> HTTPHeaderDict: - if self.schema: - cast_in_data = self.schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - """ - simple -> header - headers: PoolManager needs a mapping, tuple is close - returns headers: dict - """ - if self.style: - value = self._serialize_simple(cast_in_data, self.name, self.explode, False) - return self.__to_headers(((self.name, value),)) - # self.content will be length one - for content_type, schema in self.content.items(): - cast_in_data = schema(in_data) - cast_in_data = self._json_encoder.default(cast_in_data) - if self._content_type_is_json(content_type): - value = self._serialize_json(cast_in_data) - return self.__to_headers(((self.name, value),)) - raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) - - -class Encoding: - def __init__( - self, - content_type: str, - headers: typing.Optional[typing.Dict[str, HeaderParameter]] = None, - style: typing.Optional[ParameterStyle] = None, - explode: bool = False, - allow_reserved: bool = False, - ): - self.content_type = content_type - self.headers = headers - self.style = style - self.explode = explode - self.allow_reserved = allow_reserved - - -@dataclass -class MediaType: - """ - Used to store request and response body schema information - encoding: - A map between a property name and its encoding information. - The key, being the property name, MUST exist in the schema as a property. - The encoding object SHALL only apply to requestBody objects when the media type is - multipart or application/x-www-form-urlencoded. - """ - schema: typing.Optional[typing.Type[Schema]] = None - encoding: typing.Optional[typing.Dict[str, Encoding]] = None - - -@dataclass -class ApiResponse: - response: urllib3.HTTPResponse - body: typing.Union[Unset, Schema] = unset - headers: typing.Union[Unset, typing.Dict[str, Schema]] = unset - - def __init__( - self, - response: urllib3.HTTPResponse, - body: typing.Union[Unset, Schema] = unset, - headers: typing.Union[Unset, typing.Dict[str, Schema]] = unset - ): - """ - pycharm needs this to prevent 'Unexpected argument' warnings - """ - self.response = response - self.body = body - self.headers = headers - - -@dataclass -class ApiResponseWithoutDeserialization(ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[Unset, typing.Type[Schema]] = unset - headers: typing.Union[Unset, typing.List[HeaderParameter]] = unset - - -class OpenApiResponse(JSONDetector): - __filename_content_disposition_pattern = re.compile('filename="(.+?)"') - - def __init__( - self, - response_cls: typing.Type[ApiResponse] = ApiResponse, - content: typing.Optional[typing.Dict[str, MediaType]] = None, - headers: typing.Optional[typing.List[HeaderParameter]] = None, - ): - self.headers = headers - if content is not None and len(content) == 0: - raise ValueError('Invalid value for content, the content dict must have >= 1 entry') - self.content = content - self.response_cls = response_cls - - @staticmethod - def __deserialize_json(response: urllib3.HTTPResponse) -> typing.Any: - # python must be >= 3.9 so we can pass in bytes into json.loads - return json.loads(response.data) - - @staticmethod - def __file_name_from_response_url(response_url: typing.Optional[str]) -> typing.Optional[str]: - if response_url is None: - return None - url_path = urlparse(response_url).path - if url_path: - path_basename = os.path.basename(url_path) - if path_basename: - _filename, ext = os.path.splitext(path_basename) - if ext: - return path_basename - return None - - @classmethod - def __file_name_from_content_disposition(cls, content_disposition: typing.Optional[str]) -> typing.Optional[str]: - if content_disposition is None: - return None - match = cls.__filename_content_disposition_pattern.search(content_disposition) - if not match: - return None - return match.group(1) - - def __deserialize_application_octet_stream( - self, response: urllib3.HTTPResponse - ) -> typing.Union[bytes, io.BufferedReader]: - """ - urllib3 use cases: - 1. when preload_content=True (stream=False) then supports_chunked_reads is False and bytes are returned - 2. when preload_content=False (stream=True) then supports_chunked_reads is True and - a file will be written and returned - """ - if response.supports_chunked_reads(): - file_name = ( - self.__file_name_from_content_disposition(response.headers.get('content-disposition')) - or self.__file_name_from_response_url(response.geturl()) - ) - - if file_name is None: - _fd, path = tempfile.mkstemp() - else: - path = os.path.join(tempfile.gettempdir(), file_name) - - with open(path, 'wb') as new_file: - chunk_size = 1024 - while True: - data = response.read(chunk_size) - if not data: - break - new_file.write(data) - # release_conn is needed for streaming connections only - response.release_conn() - new_file = open(path, 'rb') - return new_file - else: - return response.data - - @staticmethod - def __deserialize_multipart_form_data( - response: urllib3.HTTPResponse - ) -> typing.Dict[str, typing.Any]: - msg = email.message_from_bytes(response.data) - return { - part.get_param("name", header="Content-Disposition"): part.get_payload( - decode=True - ).decode(part.get_content_charset()) - if part.get_content_charset() - else part.get_payload() - for part in msg.get_payload() - } - - def deserialize(self, response: urllib3.HTTPResponse, configuration: Configuration) -> ApiResponse: - content_type = response.getheader('content-type') - deserialized_body = unset - streamed = response.supports_chunked_reads() - - deserialized_headers = unset - if self.headers is not None: - # TODO add header deserialiation here - pass - - if self.content is not None: - if content_type not in self.content: - raise ApiValueError( - f"Invalid content_type returned. Content_type='{content_type}' was returned " - f"when only {str(set(self.content))} are defined for status_code={str(response.status)}" - ) - body_schema = self.content[content_type].schema - if body_schema is None: - # some specs do not define response content media type schemas - return self.response_cls( - response=response, - headers=deserialized_headers, - body=unset - ) - - if self._content_type_is_json(content_type): - body_data = self.__deserialize_json(response) - elif content_type == 'application/octet-stream': - body_data = self.__deserialize_application_octet_stream(response) - elif content_type.startswith('multipart/form-data'): - body_data = self.__deserialize_multipart_form_data(response) - content_type = 'multipart/form-data' - else: - raise NotImplementedError('Deserialization of {} has not yet been implemented'.format(content_type)) - deserialized_body = body_schema.from_openapi_data_oapg( - body_data, _configuration=configuration) - elif streamed: - response.release_conn() - - return self.response_cls( - response=response, - headers=deserialized_headers, - body=deserialized_body - ) - - -class ApiClient: - """Generic API client for OpenAPI client library builds. - - OpenAPI generic API client. This client handles the client- - server communication, and is invariant across implementations. Specifics of - the methods and models for each application are generated from the OpenAPI - templates. - - NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - Do not edit the class manually. - - :param configuration: .Configuration object for this client - :param header_name: a header to pass when making calls to the API. - :param header_value: a header value to pass when making calls to - the API. - :param cookie: a cookie to include in the header when making calls - to the API - :param pool_threads: The number of threads to use for async requests - to the API. More threads means more concurrent API requests. - """ - - _pool = None - - def __init__( - self, - configuration: typing.Optional[Configuration] = None, - header_name: typing.Optional[str] = None, - header_value: typing.Optional[str] = None, - cookie: typing.Optional[str] = None, - pool_threads: int = 1 - ): - if configuration is None: - configuration = Configuration() - self.configuration = configuration - self.pool_threads = pool_threads - - self.rest_client = rest.RESTClientObject(configuration) - self.default_headers = HTTPHeaderDict() - if header_name is not None: - self.default_headers[header_name] = header_value - self.cookie = cookie - # Set default User-Agent. - self.user_agent = 'OpenAPI-Generator/1.0.0/python' - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def close(self): - if self._pool: - self._pool.close() - self._pool.join() - self._pool = None - if hasattr(atexit, 'unregister'): - atexit.unregister(self.close) - - @property - def pool(self): - """Create thread pool on first request - avoids instantiating unused threadpool for blocking clients. - """ - if self._pool is None: - atexit.register(self.close) - self._pool = ThreadPool(self.pool_threads) - return self._pool - - @property - def user_agent(self): - """User agent for this API client""" - return self.default_headers['User-Agent'] - - @user_agent.setter - def user_agent(self, value): - self.default_headers['User-Agent'] = value - - def set_default_header(self, header_name, header_value): - self.default_headers[header_name] = header_value - - def __call_api( - self, - resource_path: str, - method: str, - headers: typing.Optional[HTTPHeaderDict] = None, - body: typing.Optional[typing.Union[str, bytes]] = None, - fields: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None, - auth_settings: typing.Optional[typing.List[str]] = None, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - host: typing.Optional[str] = None, - ) -> urllib3.HTTPResponse: - - # header parameters - used_headers = HTTPHeaderDict(self.default_headers) - if self.cookie: - headers['Cookie'] = self.cookie - - # auth setting - self.update_params_for_auth(used_headers, - auth_settings, resource_path, method, body) - - # must happen after cookie setting and auth setting in case user is overriding those - if headers: - used_headers.update(headers) - - # request url - if host is None: - url = self.configuration.host + resource_path - else: - # use server/host defined in path or operation instead - url = host + resource_path - - # perform request and return response - response = self.request( - method, - url, - headers=used_headers, - fields=fields, - body=body, - stream=stream, - timeout=timeout, - ) - return response - - def call_api( - self, - resource_path: str, - method: str, - headers: typing.Optional[HTTPHeaderDict] = None, - body: typing.Optional[typing.Union[str, bytes]] = None, - fields: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None, - auth_settings: typing.Optional[typing.List[str]] = None, - async_req: typing.Optional[bool] = None, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - host: typing.Optional[str] = None, - ) -> urllib3.HTTPResponse: - """Makes the HTTP request (synchronous) and returns deserialized data. - - To make an async_req request, set the async_req parameter. - - :param resource_path: Path to method endpoint. - :param method: Method to call. - :param headers: Header parameters to be - placed in the request header. - :param body: Request body. - :param fields: Request post form parameters, - for `application/x-www-form-urlencoded`, `multipart/form-data`. - :param auth_settings: Auth Settings names for the request. - :param async_req: execute request asynchronously - :type async_req: bool, optional TODO remove, unused - :param stream: if True, the urllib3.HTTPResponse object will - be returned without reading/decoding response - data. Also when True, if the openapi spec describes a file download, - the data will be written to a local filesystme file and the BinarySchema - instance will also inherit from FileSchema and FileIO - Default is False. - :type stream: bool, optional - :param timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :param host: api endpoint host - :return: - If async_req parameter is True, - the request will be called asynchronously. - The method will return the request thread. - If parameter async_req is False or missing, - then the method will return the response directly. - """ - - if not async_req: - return self.__call_api( - resource_path, - method, - headers, - body, - fields, - auth_settings, - stream, - timeout, - host, - ) - - return self.pool.apply_async( - self.__call_api, - ( - resource_path, - method, - headers, - body, - json, - fields, - auth_settings, - stream, - timeout, - host, - ) - ) - - def request( - self, - method: str, - url: str, - headers: typing.Optional[HTTPHeaderDict] = None, - fields: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None, - body: typing.Optional[typing.Union[str, bytes]] = None, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> urllib3.HTTPResponse: - """Makes the HTTP request using RESTClient.""" - if method == "GET": - return self.rest_client.GET(url, - stream=stream, - timeout=timeout, - headers=headers) - elif method == "HEAD": - return self.rest_client.HEAD(url, - stream=stream, - timeout=timeout, - headers=headers) - elif method == "OPTIONS": - return self.rest_client.OPTIONS(url, - headers=headers, - fields=fields, - stream=stream, - timeout=timeout, - body=body) - elif method == "POST": - return self.rest_client.POST(url, - headers=headers, - fields=fields, - stream=stream, - timeout=timeout, - body=body) - elif method == "PUT": - return self.rest_client.PUT(url, - headers=headers, - fields=fields, - stream=stream, - timeout=timeout, - body=body) - elif method == "PATCH": - return self.rest_client.PATCH(url, - headers=headers, - fields=fields, - stream=stream, - timeout=timeout, - body=body) - elif method == "DELETE": - return self.rest_client.DELETE(url, - headers=headers, - stream=stream, - timeout=timeout, - body=body) - else: - raise ApiValueError( - "http method must be `GET`, `HEAD`, `OPTIONS`," - " `POST`, `PATCH`, `PUT` or `DELETE`." - ) - - def update_params_for_auth(self, headers, auth_settings, - resource_path, method, body): - """Updates header and query params based on authentication setting. - - :param headers: Header parameters dict to be updated. - :param auth_settings: Authentication setting identifiers list. - :param resource_path: A string representation of the HTTP request resource path. - :param method: A string representation of the HTTP request method. - :param body: A object representing the body of the HTTP request. - The object type is the return value of _encoder.default(). - """ - if not auth_settings: - return - - for auth in auth_settings: - auth_setting = self.configuration.auth_settings().get(auth) - if not auth_setting: - continue - if auth_setting['in'] == 'cookie': - headers.add('Cookie', auth_setting['value']) - elif auth_setting['in'] == 'header': - if auth_setting['type'] != 'http-signature': - headers.add(auth_setting['key'], auth_setting['value']) - elif auth_setting['in'] == 'query': - """ TODO implement auth in query - need to pass in prefix_separator_iterator - and need to output resource_path with query params added - """ - raise ApiValueError("Auth in query not yet implemented") - else: - raise ApiValueError( - 'Authentication token must be in `query` or `header`' - ) - - -class Api: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client: typing.Optional[ApiClient] = None): - if api_client is None: - api_client = ApiClient() - self.api_client = api_client - - @staticmethod - def _verify_typed_dict_inputs_oapg(cls: typing.Type[typing_extensions.TypedDict], data: typing.Dict[str, typing.Any]): - """ - Ensures that: - - required keys are present - - additional properties are not input - - value stored under required keys do not have the value unset - Note: detailed value checking is done in schema classes - """ - missing_required_keys = [] - required_keys_with_unset_values = [] - for required_key in cls.__required_keys__: - if required_key not in data: - missing_required_keys.append(required_key) - continue - value = data[required_key] - if value is unset: - required_keys_with_unset_values.append(required_key) - if missing_required_keys: - raise ApiTypeError( - '{} missing {} required arguments: {}'.format( - cls.__name__, len(missing_required_keys), missing_required_keys - ) - ) - if required_keys_with_unset_values: - raise ApiValueError( - '{} contains invalid unset values for {} required keys: {}'.format( - cls.__name__, len(required_keys_with_unset_values), required_keys_with_unset_values - ) - ) - - disallowed_additional_keys = [] - for key in data: - if key in cls.__required_keys__ or key in cls.__optional_keys__: - continue - disallowed_additional_keys.append(key) - if disallowed_additional_keys: - raise ApiTypeError( - '{} got {} unexpected keyword arguments: {}'.format( - cls.__name__, len(disallowed_additional_keys), disallowed_additional_keys - ) - ) - - def _get_host_oapg( - self, - operation_id: str, - servers: typing.Tuple[typing.Dict[str, str], ...] = tuple(), - host_index: typing.Optional[int] = None - ) -> typing.Optional[str]: - configuration = self.api_client.configuration - try: - if host_index is None: - index = configuration.server_operation_index.get( - operation_id, configuration.server_index - ) - else: - index = host_index - server_variables = configuration.server_operation_variables.get( - operation_id, configuration.server_variables - ) - host = configuration.get_host_from_settings( - index, variables=server_variables, servers=servers - ) - except IndexError: - if servers: - raise ApiValueError( - "Invalid host index. Must be 0 <= index < %s" % - len(servers) - ) - host = None - return host - - -class SerializedRequestBody(typing_extensions.TypedDict, total=False): - body: typing.Union[str, bytes] - fields: typing.Tuple[typing.Union[RequestField, typing.Tuple[str, str]], ...] - - -class RequestBody(StyleFormSerializer, JSONDetector): - """ - A request body parameter - content: content_type to MediaType Schema info - """ - __json_encoder = JSONEncoder() - - def __init__( - self, - content: typing.Dict[str, MediaType], - required: bool = False, - ): - self.required = required - if len(content) == 0: - raise ValueError('Invalid value for content, the content dict must have >= 1 entry') - self.content = content - - def __serialize_json( - self, - in_data: typing.Any - ) -> typing.Dict[str, bytes]: - in_data = self.__json_encoder.default(in_data) - json_str = json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode( - "utf-8" - ) - return dict(body=json_str) - - @staticmethod - def __serialize_text_plain(in_data: typing.Any) -> typing.Dict[str, str]: - if isinstance(in_data, frozendict.frozendict): - raise ValueError('Unable to serialize type frozendict.frozendict to text/plain') - elif isinstance(in_data, tuple): - raise ValueError('Unable to serialize type tuple to text/plain') - elif isinstance(in_data, NoneClass): - raise ValueError('Unable to serialize type NoneClass to text/plain') - elif isinstance(in_data, BoolClass): - raise ValueError('Unable to serialize type BoolClass to text/plain') - return dict(body=str(in_data)) - - def __multipart_json_item(self, key: str, value: Schema) -> RequestField: - json_value = self.__json_encoder.default(value) - request_field = RequestField(name=key, data=json.dumps(json_value)) - request_field.make_multipart(content_type='application/json') - return request_field - - def __multipart_form_item(self, key: str, value: Schema) -> RequestField: - if isinstance(value, str): - request_field = RequestField(name=key, data=str(value)) - request_field.make_multipart(content_type='text/plain') - elif isinstance(value, bytes): - request_field = RequestField(name=key, data=value) - request_field.make_multipart(content_type='application/octet-stream') - elif isinstance(value, FileIO): - # TODO use content.encoding to limit allowed content types if they are present - request_field = RequestField.from_tuples(key, (os.path.basename(value.name), value.read())) - value.close() - else: - request_field = self.__multipart_json_item(key=key, value=value) - return request_field - - def __serialize_multipart_form_data( - self, in_data: Schema - ) -> typing.Dict[str, typing.Tuple[RequestField, ...]]: - if not isinstance(in_data, frozendict.frozendict): - raise ValueError(f'Unable to serialize {in_data} to multipart/form-data because it is not a dict of data') - """ - In a multipart/form-data request body, each schema property, or each element of a schema array property, - takes a section in the payload with an internal header as defined by RFC7578. The serialization strategy - for each property of a multipart/form-data request body can be specified in an associated Encoding Object. - - When passing in multipart types, boundaries MAY be used to separate sections of the content being - transferred – thus, the following default Content-Types are defined for multipart: - - If the (object) property is a primitive, or an array of primitive values, the default Content-Type is text/plain - If the property is complex, or an array of complex values, the default Content-Type is application/json - Question: how is the array of primitives encoded? - If the property is a type: string with a contentEncoding, the default Content-Type is application/octet-stream - """ - fields = [] - for key, value in in_data.items(): - if isinstance(value, tuple): - if value: - # values use explode = True, so the code makes a RequestField for each item with name=key - for item in value: - request_field = self.__multipart_form_item(key=key, value=item) - fields.append(request_field) - else: - # send an empty array as json because exploding will not send it - request_field = self.__multipart_json_item(key=key, value=value) - fields.append(request_field) - else: - request_field = self.__multipart_form_item(key=key, value=value) - fields.append(request_field) - - return dict(fields=tuple(fields)) - - def __serialize_application_octet_stream(self, in_data: BinarySchema) -> typing.Dict[str, bytes]: - if isinstance(in_data, bytes): - return dict(body=in_data) - # FileIO type - result = dict(body=in_data.read()) - in_data.close() - return result - - def __serialize_application_x_www_form_data( - self, in_data: typing.Any - ) -> SerializedRequestBody: - """ - POST submission of form data in body - """ - if not isinstance(in_data, frozendict.frozendict): - raise ValueError( - f'Unable to serialize {in_data} to application/x-www-form-urlencoded because it is not a dict of data') - cast_in_data = self.__json_encoder.default(in_data) - value = self._serialize_form(cast_in_data, name='', explode=True, percent_encode=True) - return dict(body=value) - - def serialize( - self, in_data: typing.Any, content_type: str - ) -> SerializedRequestBody: - """ - If a str is returned then the result will be assigned to data when making the request - If a tuple is returned then the result will be used as fields input in encode_multipart_formdata - Return a tuple of - - The key of the return dict is - - body for application/json - - encode_multipart and fields for multipart/form-data - """ - media_type = self.content[content_type] - if isinstance(in_data, media_type.schema): - cast_in_data = in_data - elif isinstance(in_data, (dict, frozendict.frozendict)) and in_data: - cast_in_data = media_type.schema(**in_data) - else: - cast_in_data = media_type.schema(in_data) - # TODO check for and use encoding if it exists - # and content_type is multipart or application/x-www-form-urlencoded - if self._content_type_is_json(content_type): - return self.__serialize_json(cast_in_data) - elif content_type == 'text/plain': - return self.__serialize_text_plain(cast_in_data) - elif content_type == 'multipart/form-data': - return self.__serialize_multipart_form_data(cast_in_data) - elif content_type == 'application/x-www-form-urlencoded': - return self.__serialize_application_x_www_form_data(cast_in_data) - elif content_type == 'application/octet-stream': - return self.__serialize_application_octet_stream(cast_in_data) - raise NotImplementedError('Serialization has not yet been implemented for {}'.format(content_type)) \ No newline at end of file diff --git a/launch/api_client/api_response.py b/launch/api_client/api_response.py deleted file mode 100644 index ca801da0..00000000 --- a/launch/api_client/api_response.py +++ /dev/null @@ -1,22 +0,0 @@ -"""API response object.""" - -from __future__ import annotations - -from typing import Generic, Mapping, Optional, TypeVar - -from pydantic import BaseModel, Field, StrictBytes, StrictInt - -T = TypeVar("T") - - -class ApiResponse(BaseModel, Generic[T]): - """ - API response object - """ - - status_code: StrictInt = Field(description="HTTP status code") - headers: Optional[Mapping[str, str]] = Field(None, description="HTTP headers") - data: T = Field(description="Deserialized data given the data type") - raw_data: StrictBytes = Field(description="Raw data (HTTP response body)") - - model_config = {"arbitrary_types_allowed": True} diff --git a/launch/api_client/apis/__init__.py b/launch/api_client/apis/__init__.py deleted file mode 100644 index 7840f772..00000000 --- a/launch/api_client/apis/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints then import them from -# tags, paths, or path_to_api, or tag_to_api \ No newline at end of file diff --git a/launch/api_client/apis/path_to_api.py b/launch/api_client/apis/path_to_api.py deleted file mode 100644 index 5bc8d9ce..00000000 --- a/launch/api_client/apis/path_to_api.py +++ /dev/null @@ -1,217 +0,0 @@ -import typing_extensions - -from launch.api_client.apis.paths.healthcheck import Healthcheck -from launch.api_client.apis.paths.healthz import Healthz -from launch.api_client.apis.paths.readyz import Readyz -from launch.api_client.apis.paths.v1_async_tasks import V1AsyncTasks -from launch.api_client.apis.paths.v1_async_tasks_task_id import ( - V1AsyncTasksTaskId, -) -from launch.api_client.apis.paths.v1_batch_jobs import V1BatchJobs -from launch.api_client.apis.paths.v1_batch_jobs_batch_job_id import ( - V1BatchJobsBatchJobId, -) -from launch.api_client.apis.paths.v1_docker_image_batch_job_bundles import ( - V1DockerImageBatchJobBundles, -) -from launch.api_client.apis.paths.v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id import ( - V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId, -) -from launch.api_client.apis.paths.v1_docker_image_batch_job_bundles_latest import ( - V1DockerImageBatchJobBundlesLatest, -) -from launch.api_client.apis.paths.v1_docker_image_batch_jobs import ( - V1DockerImageBatchJobs, -) -from launch.api_client.apis.paths.v1_docker_image_batch_jobs_batch_job_id import ( - V1DockerImageBatchJobsBatchJobId, -) -from launch.api_client.apis.paths.v1_files import V1Files -from launch.api_client.apis.paths.v1_files_file_id import V1FilesFileId -from launch.api_client.apis.paths.v1_files_file_id_content import ( - V1FilesFileIdContent, -) -from launch.api_client.apis.paths.v1_llm_batch_completions import ( - V1LlmBatchCompletions, -) -from launch.api_client.apis.paths.v1_llm_completions_stream import ( - V1LlmCompletionsStream, -) -from launch.api_client.apis.paths.v1_llm_completions_sync import ( - V1LlmCompletionsSync, -) -from launch.api_client.apis.paths.v1_llm_fine_tunes import V1LlmFineTunes -from launch.api_client.apis.paths.v1_llm_fine_tunes_fine_tune_id import ( - V1LlmFineTunesFineTuneId, -) -from launch.api_client.apis.paths.v1_llm_fine_tunes_fine_tune_id_cancel import ( - V1LlmFineTunesFineTuneIdCancel, -) -from launch.api_client.apis.paths.v1_llm_fine_tunes_fine_tune_id_events import ( - V1LlmFineTunesFineTuneIdEvents, -) -from launch.api_client.apis.paths.v1_llm_model_endpoints import ( - V1LlmModelEndpoints, -) -from launch.api_client.apis.paths.v1_llm_model_endpoints_download import ( - V1LlmModelEndpointsDownload, -) -from launch.api_client.apis.paths.v1_llm_model_endpoints_model_endpoint_name import ( - V1LlmModelEndpointsModelEndpointName, -) -from launch.api_client.apis.paths.v1_model_bundles import V1ModelBundles -from launch.api_client.apis.paths.v1_model_bundles_clone_with_changes import ( - V1ModelBundlesCloneWithChanges, -) -from launch.api_client.apis.paths.v1_model_bundles_latest import ( - V1ModelBundlesLatest, -) -from launch.api_client.apis.paths.v1_model_bundles_model_bundle_id import ( - V1ModelBundlesModelBundleId, -) -from launch.api_client.apis.paths.v1_model_endpoints import V1ModelEndpoints -from launch.api_client.apis.paths.v1_model_endpoints_api import ( - V1ModelEndpointsApi, -) -from launch.api_client.apis.paths.v1_model_endpoints_model_endpoint_id import ( - V1ModelEndpointsModelEndpointId, -) -from launch.api_client.apis.paths.v1_model_endpoints_model_endpoint_id_restart import ( - V1ModelEndpointsModelEndpointIdRestart, -) -from launch.api_client.apis.paths.v1_model_endpoints_schema_json import ( - V1ModelEndpointsSchemaJson, -) -from launch.api_client.apis.paths.v1_streaming_tasks import V1StreamingTasks -from launch.api_client.apis.paths.v1_sync_tasks import V1SyncTasks -from launch.api_client.apis.paths.v1_triggers import V1Triggers -from launch.api_client.apis.paths.v1_triggers_trigger_id import ( - V1TriggersTriggerId, -) -from launch.api_client.apis.paths.v2_batch_completions import ( - V2BatchCompletions, -) -from launch.api_client.apis.paths.v2_batch_completions_batch_completion_id import ( - V2BatchCompletionsBatchCompletionId, -) -from launch.api_client.apis.paths.v2_batch_completions_batch_completion_id_actions_cancel import ( - V2BatchCompletionsBatchCompletionIdActionsCancel, -) -from launch.api_client.apis.paths.v2_chat_completions import V2ChatCompletions -from launch.api_client.apis.paths.v2_completions import V2Completions -from launch.api_client.apis.paths.v2_model_bundles import V2ModelBundles -from launch.api_client.apis.paths.v2_model_bundles_clone_with_changes import ( - V2ModelBundlesCloneWithChanges, -) -from launch.api_client.apis.paths.v2_model_bundles_latest import ( - V2ModelBundlesLatest, -) -from launch.api_client.apis.paths.v2_model_bundles_model_bundle_id import ( - V2ModelBundlesModelBundleId, -) -from launch.api_client.paths import PathValues - -PathToApi = typing_extensions.TypedDict( - 'PathToApi', - { - PathValues.V1_BATCHJOBS: V1BatchJobs, - PathValues.V1_BATCHJOBS_BATCH_JOB_ID: V1BatchJobsBatchJobId, - PathValues.V1_DOCKERIMAGEBATCHJOBS: V1DockerImageBatchJobs, - PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID: V1DockerImageBatchJobsBatchJobId, - PathValues.V1_ASYNCTASKS: V1AsyncTasks, - PathValues.V1_ASYNCTASKS_TASK_ID: V1AsyncTasksTaskId, - PathValues.V1_SYNCTASKS: V1SyncTasks, - PathValues.V1_STREAMINGTASKS: V1StreamingTasks, - PathValues.V1_MODELBUNDLES: V1ModelBundles, - PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES: V1ModelBundlesCloneWithChanges, - PathValues.V1_MODELBUNDLES_LATEST: V1ModelBundlesLatest, - PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID: V1ModelBundlesModelBundleId, - PathValues.V2_MODELBUNDLES: V2ModelBundles, - PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES: V2ModelBundlesCloneWithChanges, - PathValues.V2_MODELBUNDLES_LATEST: V2ModelBundlesLatest, - PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID: V2ModelBundlesModelBundleId, - PathValues.V1_MODELENDPOINTS: V1ModelEndpoints, - PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID: V1ModelEndpointsModelEndpointId, - PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART: V1ModelEndpointsModelEndpointIdRestart, - PathValues.V1_MODELENDPOINTSSCHEMA_JSON: V1ModelEndpointsSchemaJson, - PathValues.V1_MODELENDPOINTSAPI: V1ModelEndpointsApi, - PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES: V1DockerImageBatchJobBundles, - PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST: V1DockerImageBatchJobBundlesLatest, - PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID: V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId, - PathValues.V1_LLM_MODELENDPOINTS: V1LlmModelEndpoints, - PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME: V1LlmModelEndpointsModelEndpointName, - PathValues.V1_LLM_COMPLETIONSSYNC: V1LlmCompletionsSync, - PathValues.V1_LLM_COMPLETIONSSTREAM: V1LlmCompletionsStream, - PathValues.V1_LLM_FINETUNES: V1LlmFineTunes, - PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID: V1LlmFineTunesFineTuneId, - PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL: V1LlmFineTunesFineTuneIdCancel, - PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS: V1LlmFineTunesFineTuneIdEvents, - PathValues.V1_LLM_MODELENDPOINTS_DOWNLOAD: V1LlmModelEndpointsDownload, - PathValues.V1_LLM_BATCHCOMPLETIONS: V1LlmBatchCompletions, - PathValues.V1_FILES: V1Files, - PathValues.V1_FILES_FILE_ID: V1FilesFileId, - PathValues.V1_FILES_FILE_ID_CONTENT: V1FilesFileIdContent, - PathValues.V1_TRIGGERS: V1Triggers, - PathValues.V1_TRIGGERS_TRIGGER_ID: V1TriggersTriggerId, - PathValues.V2_BATCHCOMPLETIONS: V2BatchCompletions, - PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID: V2BatchCompletionsBatchCompletionId, - PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL: V2BatchCompletionsBatchCompletionIdActionsCancel, - PathValues.V2_CHAT_COMPLETIONS: V2ChatCompletions, - PathValues.V2_COMPLETIONS: V2Completions, - PathValues.HEALTHCHECK: Healthcheck, - PathValues.HEALTHZ: Healthz, - PathValues.READYZ: Readyz, - } -) - -path_to_api = PathToApi( - { - PathValues.V1_BATCHJOBS: V1BatchJobs, - PathValues.V1_BATCHJOBS_BATCH_JOB_ID: V1BatchJobsBatchJobId, - PathValues.V1_DOCKERIMAGEBATCHJOBS: V1DockerImageBatchJobs, - PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID: V1DockerImageBatchJobsBatchJobId, - PathValues.V1_ASYNCTASKS: V1AsyncTasks, - PathValues.V1_ASYNCTASKS_TASK_ID: V1AsyncTasksTaskId, - PathValues.V1_SYNCTASKS: V1SyncTasks, - PathValues.V1_STREAMINGTASKS: V1StreamingTasks, - PathValues.V1_MODELBUNDLES: V1ModelBundles, - PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES: V1ModelBundlesCloneWithChanges, - PathValues.V1_MODELBUNDLES_LATEST: V1ModelBundlesLatest, - PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID: V1ModelBundlesModelBundleId, - PathValues.V2_MODELBUNDLES: V2ModelBundles, - PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES: V2ModelBundlesCloneWithChanges, - PathValues.V2_MODELBUNDLES_LATEST: V2ModelBundlesLatest, - PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID: V2ModelBundlesModelBundleId, - PathValues.V1_MODELENDPOINTS: V1ModelEndpoints, - PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID: V1ModelEndpointsModelEndpointId, - PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART: V1ModelEndpointsModelEndpointIdRestart, - PathValues.V1_MODELENDPOINTSSCHEMA_JSON: V1ModelEndpointsSchemaJson, - PathValues.V1_MODELENDPOINTSAPI: V1ModelEndpointsApi, - PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES: V1DockerImageBatchJobBundles, - PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST: V1DockerImageBatchJobBundlesLatest, - PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID: V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId, - PathValues.V1_LLM_MODELENDPOINTS: V1LlmModelEndpoints, - PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME: V1LlmModelEndpointsModelEndpointName, - PathValues.V1_LLM_COMPLETIONSSYNC: V1LlmCompletionsSync, - PathValues.V1_LLM_COMPLETIONSSTREAM: V1LlmCompletionsStream, - PathValues.V1_LLM_FINETUNES: V1LlmFineTunes, - PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID: V1LlmFineTunesFineTuneId, - PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL: V1LlmFineTunesFineTuneIdCancel, - PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS: V1LlmFineTunesFineTuneIdEvents, - PathValues.V1_LLM_MODELENDPOINTS_DOWNLOAD: V1LlmModelEndpointsDownload, - PathValues.V1_LLM_BATCHCOMPLETIONS: V1LlmBatchCompletions, - PathValues.V1_FILES: V1Files, - PathValues.V1_FILES_FILE_ID: V1FilesFileId, - PathValues.V1_FILES_FILE_ID_CONTENT: V1FilesFileIdContent, - PathValues.V1_TRIGGERS: V1Triggers, - PathValues.V1_TRIGGERS_TRIGGER_ID: V1TriggersTriggerId, - PathValues.V2_BATCHCOMPLETIONS: V2BatchCompletions, - PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID: V2BatchCompletionsBatchCompletionId, - PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL: V2BatchCompletionsBatchCompletionIdActionsCancel, - PathValues.V2_CHAT_COMPLETIONS: V2ChatCompletions, - PathValues.V2_COMPLETIONS: V2Completions, - PathValues.HEALTHCHECK: Healthcheck, - PathValues.HEALTHZ: Healthz, - PathValues.READYZ: Readyz, - } -) diff --git a/launch/api_client/apis/paths/__init__.py b/launch/api_client/apis/paths/__init__.py deleted file mode 100644 index d856e7a6..00000000 --- a/launch/api_client/apis/paths/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.apis.path_to_api import path_to_api diff --git a/launch/api_client/apis/paths/healthcheck.py b/launch/api_client/apis/paths/healthcheck.py deleted file mode 100644 index 6f11f7bb..00000000 --- a/launch/api_client/apis/paths/healthcheck.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.healthcheck.get import ApiForget - - -class Healthcheck( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/healthz.py b/launch/api_client/apis/paths/healthz.py deleted file mode 100644 index 62f2a817..00000000 --- a/launch/api_client/apis/paths/healthz.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.healthz.get import ApiForget - - -class Healthz( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/readyz.py b/launch/api_client/apis/paths/readyz.py deleted file mode 100644 index 35b8e737..00000000 --- a/launch/api_client/apis/paths/readyz.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.readyz.get import ApiForget - - -class Readyz( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_async_tasks.py b/launch/api_client/apis/paths/v1_async_tasks.py deleted file mode 100644 index 74bb2cba..00000000 --- a/launch/api_client/apis/paths/v1_async_tasks.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_async_tasks.post import ApiForpost - - -class V1AsyncTasks( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_async_tasks_task_id.py b/launch/api_client/apis/paths/v1_async_tasks_task_id.py deleted file mode 100644 index 473e4bb4..00000000 --- a/launch/api_client/apis/paths/v1_async_tasks_task_id.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_async_tasks_task_id.get import ApiForget - - -class V1AsyncTasksTaskId( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_batch_jobs.py b/launch/api_client/apis/paths/v1_batch_jobs.py deleted file mode 100644 index 73b60ebc..00000000 --- a/launch/api_client/apis/paths/v1_batch_jobs.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_batch_jobs.post import ApiForpost - - -class V1BatchJobs( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_batch_jobs_batch_job_id.py b/launch/api_client/apis/paths/v1_batch_jobs_batch_job_id.py deleted file mode 100644 index 40bb9f1a..00000000 --- a/launch/api_client/apis/paths/v1_batch_jobs_batch_job_id.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_batch_jobs_batch_job_id.get import ApiForget -from launch.api_client.paths.v1_batch_jobs_batch_job_id.put import ApiForput - - -class V1BatchJobsBatchJobId( - ApiForget, - ApiForput, -): - pass diff --git a/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles.py b/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles.py deleted file mode 100644 index 9c248c3e..00000000 --- a/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles.py +++ /dev/null @@ -1,13 +0,0 @@ -from launch.api_client.paths.v1_docker_image_batch_job_bundles.get import ( - ApiForget, -) -from launch.api_client.paths.v1_docker_image_batch_job_bundles.post import ( - ApiForpost, -) - - -class V1DockerImageBatchJobBundles( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id.py b/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id.py deleted file mode 100644 index 1c7b1545..00000000 --- a/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id.get import ( - ApiForget, -) - - -class V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles_latest.py b/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles_latest.py deleted file mode 100644 index 850fdd78..00000000 --- a/launch/api_client/apis/paths/v1_docker_image_batch_job_bundles_latest.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_docker_image_batch_job_bundles_latest.get import ( - ApiForget, -) - - -class V1DockerImageBatchJobBundlesLatest( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_docker_image_batch_jobs.py b/launch/api_client/apis/paths/v1_docker_image_batch_jobs.py deleted file mode 100644 index 889c0966..00000000 --- a/launch/api_client/apis/paths/v1_docker_image_batch_jobs.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_docker_image_batch_jobs.get import ApiForget -from launch.api_client.paths.v1_docker_image_batch_jobs.post import ApiForpost - - -class V1DockerImageBatchJobs( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_docker_image_batch_jobs_batch_job_id.py b/launch/api_client/apis/paths/v1_docker_image_batch_jobs_batch_job_id.py deleted file mode 100644 index 866ae8c8..00000000 --- a/launch/api_client/apis/paths/v1_docker_image_batch_jobs_batch_job_id.py +++ /dev/null @@ -1,13 +0,0 @@ -from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id.get import ( - ApiForget, -) -from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id.put import ( - ApiForput, -) - - -class V1DockerImageBatchJobsBatchJobId( - ApiForget, - ApiForput, -): - pass diff --git a/launch/api_client/apis/paths/v1_files.py b/launch/api_client/apis/paths/v1_files.py deleted file mode 100644 index 2df3f0ce..00000000 --- a/launch/api_client/apis/paths/v1_files.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_files.get import ApiForget -from launch.api_client.paths.v1_files.post import ApiForpost - - -class V1Files( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_files_file_id.py b/launch/api_client/apis/paths/v1_files_file_id.py deleted file mode 100644 index 136f3cdf..00000000 --- a/launch/api_client/apis/paths/v1_files_file_id.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_files_file_id.delete import ApiFordelete -from launch.api_client.paths.v1_files_file_id.get import ApiForget - - -class V1FilesFileId( - ApiForget, - ApiFordelete, -): - pass diff --git a/launch/api_client/apis/paths/v1_files_file_id_content.py b/launch/api_client/apis/paths/v1_files_file_id_content.py deleted file mode 100644 index daaf5d6f..00000000 --- a/launch/api_client/apis/paths/v1_files_file_id_content.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_files_file_id_content.get import ApiForget - - -class V1FilesFileIdContent( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_batch_completions.py b/launch/api_client/apis/paths/v1_llm_batch_completions.py deleted file mode 100644 index b4e2b537..00000000 --- a/launch/api_client/apis/paths/v1_llm_batch_completions.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_llm_batch_completions.post import ApiForpost - - -class V1LlmBatchCompletions( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_completion_sync.py b/launch/api_client/apis/paths/v1_llm_completion_sync.py deleted file mode 100644 index 3c2dfa29..00000000 --- a/launch/api_client/apis/paths/v1_llm_completion_sync.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_llm_completion_sync.post import ApiForpost - - -class V1LlmCompletionSync( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_completions_stream.py b/launch/api_client/apis/paths/v1_llm_completions_stream.py deleted file mode 100644 index 2eb7b765..00000000 --- a/launch/api_client/apis/paths/v1_llm_completions_stream.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_llm_completions_stream.post import ApiForpost - - -class V1LlmCompletionsStream( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_completions_sync.py b/launch/api_client/apis/paths/v1_llm_completions_sync.py deleted file mode 100644 index c4120f96..00000000 --- a/launch/api_client/apis/paths/v1_llm_completions_sync.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_llm_completions_sync.post import ApiForpost - - -class V1LlmCompletionsSync( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_fine_tunes.py b/launch/api_client/apis/paths/v1_llm_fine_tunes.py deleted file mode 100644 index f3e92d35..00000000 --- a/launch/api_client/apis/paths/v1_llm_fine_tunes.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_llm_fine_tunes.get import ApiForget -from launch.api_client.paths.v1_llm_fine_tunes.post import ApiForpost - - -class V1LlmFineTunes( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id.py b/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id.py deleted file mode 100644 index dc62cadb..00000000 --- a/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id.get import ( - ApiForget, -) - - -class V1LlmFineTunesFineTuneId( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id_cancel.py b/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id_cancel.py deleted file mode 100644 index b342ebf8..00000000 --- a/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id_cancel.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_cancel.put import ( - ApiForput, -) - - -class V1LlmFineTunesFineTuneIdCancel( - ApiForput, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id_events.py b/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id_events.py deleted file mode 100644 index 6beba5dd..00000000 --- a/launch/api_client/apis/paths/v1_llm_fine_tunes_fine_tune_id_events.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_events.get import ( - ApiForget, -) - - -class V1LlmFineTunesFineTuneIdEvents( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_fine_tunes_model_endpoint_name_events.py b/launch/api_client/apis/paths/v1_llm_fine_tunes_model_endpoint_name_events.py deleted file mode 100644 index eaa328b2..00000000 --- a/launch/api_client/apis/paths/v1_llm_fine_tunes_model_endpoint_name_events.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_llm_fine_tunes_model_endpoint_name_events.get import ( - ApiForget, -) - - -class V1LlmFineTunesModelEndpointNameEvents( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_model_endpoints.py b/launch/api_client/apis/paths/v1_llm_model_endpoints.py deleted file mode 100644 index aa94d9f7..00000000 --- a/launch/api_client/apis/paths/v1_llm_model_endpoints.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_llm_model_endpoints.get import ApiForget -from launch.api_client.paths.v1_llm_model_endpoints.post import ApiForpost - - -class V1LlmModelEndpoints( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_model_endpoints_download.py b/launch/api_client/apis/paths/v1_llm_model_endpoints_download.py deleted file mode 100644 index 0b7b13e5..00000000 --- a/launch/api_client/apis/paths/v1_llm_model_endpoints_download.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_llm_model_endpoints_download.post import ( - ApiForpost, -) - - -class V1LlmModelEndpointsDownload( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_llm_model_endpoints_model_endpoint_name.py b/launch/api_client/apis/paths/v1_llm_model_endpoints_model_endpoint_name.py deleted file mode 100644 index 97cab173..00000000 --- a/launch/api_client/apis/paths/v1_llm_model_endpoints_model_endpoint_name.py +++ /dev/null @@ -1,17 +0,0 @@ -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name.delete import ( - ApiFordelete, -) -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name.get import ( - ApiForget, -) -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name.put import ( - ApiForput, -) - - -class V1LlmModelEndpointsModelEndpointName( - ApiForget, - ApiForput, - ApiFordelete, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_bundles.py b/launch/api_client/apis/paths/v1_model_bundles.py deleted file mode 100644 index 205a87dc..00000000 --- a/launch/api_client/apis/paths/v1_model_bundles.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_model_bundles.get import ApiForget -from launch.api_client.paths.v1_model_bundles.post import ApiForpost - - -class V1ModelBundles( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_bundles_clone_with_changes.py b/launch/api_client/apis/paths/v1_model_bundles_clone_with_changes.py deleted file mode 100644 index df0644e3..00000000 --- a/launch/api_client/apis/paths/v1_model_bundles_clone_with_changes.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_model_bundles_clone_with_changes.post import ( - ApiForpost, -) - - -class V1ModelBundlesCloneWithChanges( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_bundles_latest.py b/launch/api_client/apis/paths/v1_model_bundles_latest.py deleted file mode 100644 index e84f6419..00000000 --- a/launch/api_client/apis/paths/v1_model_bundles_latest.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_model_bundles_latest.get import ApiForget - - -class V1ModelBundlesLatest( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_bundles_model_bundle_id.py b/launch/api_client/apis/paths/v1_model_bundles_model_bundle_id.py deleted file mode 100644 index 0e646257..00000000 --- a/launch/api_client/apis/paths/v1_model_bundles_model_bundle_id.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_model_bundles_model_bundle_id.get import ( - ApiForget, -) - - -class V1ModelBundlesModelBundleId( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_endpoints.py b/launch/api_client/apis/paths/v1_model_endpoints.py deleted file mode 100644 index 070bb511..00000000 --- a/launch/api_client/apis/paths/v1_model_endpoints.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_model_endpoints.get import ApiForget -from launch.api_client.paths.v1_model_endpoints.post import ApiForpost - - -class V1ModelEndpoints( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_endpoints_api.py b/launch/api_client/apis/paths/v1_model_endpoints_api.py deleted file mode 100644 index 148b69f8..00000000 --- a/launch/api_client/apis/paths/v1_model_endpoints_api.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_model_endpoints_api.get import ApiForget - - -class V1ModelEndpointsApi( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id.py b/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id.py deleted file mode 100644 index 7f4d3e3a..00000000 --- a/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id.py +++ /dev/null @@ -1,17 +0,0 @@ -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id.delete import ( - ApiFordelete, -) -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id.get import ( - ApiForget, -) -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id.put import ( - ApiForput, -) - - -class V1ModelEndpointsModelEndpointId( - ApiForget, - ApiForput, - ApiFordelete, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id_restart.py b/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id_restart.py deleted file mode 100644 index a0a147c8..00000000 --- a/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id_restart.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart.post import ( - ApiForpost, -) - - -class V1ModelEndpointsModelEndpointIdRestart( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_model_endpoints_schema_json.py b/launch/api_client/apis/paths/v1_model_endpoints_schema_json.py deleted file mode 100644 index 7c62f95b..00000000 --- a/launch/api_client/apis/paths/v1_model_endpoints_schema_json.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_model_endpoints_schema_json.get import ( - ApiForget, -) - - -class V1ModelEndpointsSchemaJson( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v1_streaming_tasks.py b/launch/api_client/apis/paths/v1_streaming_tasks.py deleted file mode 100644 index 22dd42a4..00000000 --- a/launch/api_client/apis/paths/v1_streaming_tasks.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_streaming_tasks.post import ApiForpost - - -class V1StreamingTasks( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_sync_tasks.py b/launch/api_client/apis/paths/v1_sync_tasks.py deleted file mode 100644 index 6f4bfe5d..00000000 --- a/launch/api_client/apis/paths/v1_sync_tasks.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v1_sync_tasks.post import ApiForpost - - -class V1SyncTasks( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_triggers.py b/launch/api_client/apis/paths/v1_triggers.py deleted file mode 100644 index b7029e59..00000000 --- a/launch/api_client/apis/paths/v1_triggers.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v1_triggers.get import ApiForget -from launch.api_client.paths.v1_triggers.post import ApiForpost - - -class V1Triggers( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v1_triggers_trigger_id.py b/launch/api_client/apis/paths/v1_triggers_trigger_id.py deleted file mode 100644 index f8cc0439..00000000 --- a/launch/api_client/apis/paths/v1_triggers_trigger_id.py +++ /dev/null @@ -1,11 +0,0 @@ -from launch.api_client.paths.v1_triggers_trigger_id.delete import ApiFordelete -from launch.api_client.paths.v1_triggers_trigger_id.get import ApiForget -from launch.api_client.paths.v1_triggers_trigger_id.put import ApiForput - - -class V1TriggersTriggerId( - ApiForget, - ApiForput, - ApiFordelete, -): - pass diff --git a/launch/api_client/apis/paths/v2_batch_completions.py b/launch/api_client/apis/paths/v2_batch_completions.py deleted file mode 100644 index bd23de97..00000000 --- a/launch/api_client/apis/paths/v2_batch_completions.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v2_batch_completions.post import ApiForpost - - -class V2BatchCompletions( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id.py b/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id.py deleted file mode 100644 index b2d71d04..00000000 --- a/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id.py +++ /dev/null @@ -1,13 +0,0 @@ -from launch.api_client.paths.v2_batch_completions_batch_completion_id.get import ( - ApiForget, -) -from launch.api_client.paths.v2_batch_completions_batch_completion_id.post import ( - ApiForpost, -) - - -class V2BatchCompletionsBatchCompletionId( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id_actions_cancel.py b/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id_actions_cancel.py deleted file mode 100644 index 0fc54183..00000000 --- a/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id_actions_cancel.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel.post import ( - ApiForpost, -) - - -class V2BatchCompletionsBatchCompletionIdActionsCancel( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v2_chat_completions.py b/launch/api_client/apis/paths/v2_chat_completions.py deleted file mode 100644 index 72995ebd..00000000 --- a/launch/api_client/apis/paths/v2_chat_completions.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v2_chat_completions.post import ApiForpost - - -class V2ChatCompletions( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v2_completions.py b/launch/api_client/apis/paths/v2_completions.py deleted file mode 100644 index a2189e8c..00000000 --- a/launch/api_client/apis/paths/v2_completions.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v2_completions.post import ApiForpost - - -class V2Completions( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v2_model_bundles.py b/launch/api_client/apis/paths/v2_model_bundles.py deleted file mode 100644 index 8545c6f2..00000000 --- a/launch/api_client/apis/paths/v2_model_bundles.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v2_model_bundles.get import ApiForget -from launch.api_client.paths.v2_model_bundles.post import ApiForpost - - -class V2ModelBundles( - ApiForget, - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v2_model_bundles_clone_with_changes.py b/launch/api_client/apis/paths/v2_model_bundles_clone_with_changes.py deleted file mode 100644 index 959b2207..00000000 --- a/launch/api_client/apis/paths/v2_model_bundles_clone_with_changes.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v2_model_bundles_clone_with_changes.post import ( - ApiForpost, -) - - -class V2ModelBundlesCloneWithChanges( - ApiForpost, -): - pass diff --git a/launch/api_client/apis/paths/v2_model_bundles_latest.py b/launch/api_client/apis/paths/v2_model_bundles_latest.py deleted file mode 100644 index befb36ff..00000000 --- a/launch/api_client/apis/paths/v2_model_bundles_latest.py +++ /dev/null @@ -1,7 +0,0 @@ -from launch.api_client.paths.v2_model_bundles_latest.get import ApiForget - - -class V2ModelBundlesLatest( - ApiForget, -): - pass diff --git a/launch/api_client/apis/paths/v2_model_bundles_model_bundle_id.py b/launch/api_client/apis/paths/v2_model_bundles_model_bundle_id.py deleted file mode 100644 index e8ce8945..00000000 --- a/launch/api_client/apis/paths/v2_model_bundles_model_bundle_id.py +++ /dev/null @@ -1,9 +0,0 @@ -from launch.api_client.paths.v2_model_bundles_model_bundle_id.get import ( - ApiForget, -) - - -class V2ModelBundlesModelBundleId( - ApiForget, -): - pass diff --git a/launch/api_client/apis/tag_to_api.py b/launch/api_client/apis/tag_to_api.py deleted file mode 100644 index f2a2dfcf..00000000 --- a/launch/api_client/apis/tag_to_api.py +++ /dev/null @@ -1,17 +0,0 @@ -import typing_extensions - -from launch.api_client.apis.tags import TagValues -from launch.api_client.apis.tags.default_api import DefaultApi - -TagToApi = typing_extensions.TypedDict( - 'TagToApi', - { - TagValues.DEFAULT: DefaultApi, - } -) - -tag_to_api = TagToApi( - { - TagValues.DEFAULT: DefaultApi, - } -) diff --git a/launch/api_client/apis/tags/__init__.py b/launch/api_client/apis/tags/__init__.py deleted file mode 100644 index 5595c045..00000000 --- a/launch/api_client/apis/tags/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.apis.tag_to_api import tag_to_api - -import enum - - -class TagValues(str, enum.Enum): - DEFAULT = "default" diff --git a/launch/api_client/apis/tags/default_api.py b/launch/api_client/apis/tags/default_api.py deleted file mode 100644 index 01235de7..00000000 --- a/launch/api_client/apis/tags/default_api.py +++ /dev/null @@ -1,273 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -from launch.api_client.paths.healthcheck.get import HealthcheckHealthcheckGet -from launch.api_client.paths.healthz.get import HealthcheckHealthzGet -from launch.api_client.paths.readyz.get import HealthcheckReadyzGet -from launch.api_client.paths.v1_async_tasks.post import ( - CreateAsyncInferenceTaskV1AsyncTasksPost, -) -from launch.api_client.paths.v1_async_tasks_task_id.get import ( - GetAsyncInferenceTaskV1AsyncTasksTaskIdGet, -) -from launch.api_client.paths.v1_batch_jobs.post import ( - CreateBatchJobV1BatchJobsPost, -) -from launch.api_client.paths.v1_batch_jobs_batch_job_id.get import ( - GetBatchJobV1BatchJobsBatchJobIdGet, -) -from launch.api_client.paths.v1_batch_jobs_batch_job_id.put import ( - UpdateBatchJobV1BatchJobsBatchJobIdPut, -) -from launch.api_client.paths.v1_docker_image_batch_job_bundles.get import ( - ListDockerImageBatchJobModelBundlesV1DockerImageBatchJobBundlesGet, -) -from launch.api_client.paths.v1_docker_image_batch_job_bundles.post import ( - CreateDockerImageBatchJobBundleV1DockerImageBatchJobBundlesPost, -) -from launch.api_client.paths.v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id.get import ( - GetDockerImageBatchJobModelBundleV1DockerImageBatchJobBundlesDockerImageBatchJobBundleIdGet, -) -from launch.api_client.paths.v1_docker_image_batch_job_bundles_latest.get import ( - GetLatestDockerImageBatchJobBundleV1DockerImageBatchJobBundlesLatestGet, -) -from launch.api_client.paths.v1_docker_image_batch_jobs.get import ( - ListDockerImageBatchJobsV1DockerImageBatchJobsGet, -) -from launch.api_client.paths.v1_docker_image_batch_jobs.post import ( - CreateDockerImageBatchJobV1DockerImageBatchJobsPost, -) -from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id.get import ( - GetDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdGet, -) -from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id.put import ( - UpdateDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdPut, -) -from launch.api_client.paths.v1_files.get import ListFilesV1FilesGet -from launch.api_client.paths.v1_files.post import UploadFileV1FilesPost -from launch.api_client.paths.v1_files_file_id.delete import ( - DeleteFileV1FilesFileIdDelete, -) -from launch.api_client.paths.v1_files_file_id.get import ( - GetFileV1FilesFileIdGet, -) -from launch.api_client.paths.v1_files_file_id_content.get import ( - GetFileContentV1FilesFileIdContentGet, -) -from launch.api_client.paths.v1_llm_batch_completions.post import ( - CreateBatchCompletionsV1LlmBatchCompletionsPost, -) -from launch.api_client.paths.v1_llm_completions_stream.post import ( - CreateCompletionStreamTaskV1LlmCompletionsStreamPost, -) -from launch.api_client.paths.v1_llm_completions_sync.post import ( - CreateCompletionSyncTaskV1LlmCompletionsSyncPost, -) -from launch.api_client.paths.v1_llm_fine_tunes.get import ( - ListFineTunesV1LlmFineTunesGet, -) -from launch.api_client.paths.v1_llm_fine_tunes.post import ( - CreateFineTuneV1LlmFineTunesPost, -) -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id.get import ( - GetFineTuneV1LlmFineTunesFineTuneIdGet, -) -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_cancel.put import ( - CancelFineTuneV1LlmFineTunesFineTuneIdCancelPut, -) -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_events.get import ( - GetFineTuneEventsV1LlmFineTunesFineTuneIdEventsGet, -) -from launch.api_client.paths.v1_llm_model_endpoints.get import ( - ListModelEndpointsV1LlmModelEndpointsGet, -) -from launch.api_client.paths.v1_llm_model_endpoints.post import ( - CreateModelEndpointV1LlmModelEndpointsPost, -) -from launch.api_client.paths.v1_llm_model_endpoints_download.post import ( - DownloadModelEndpointV1LlmModelEndpointsDownloadPost, -) -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name.delete import ( - DeleteLlmModelEndpointV1LlmModelEndpointsModelEndpointNameDelete, -) -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name.get import ( - GetModelEndpointV1LlmModelEndpointsModelEndpointNameGet, -) -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name.put import ( - UpdateModelEndpointV1LlmModelEndpointsModelEndpointNamePut, -) -from launch.api_client.paths.v1_model_bundles.get import ( - ListModelBundlesV1ModelBundlesGet, -) -from launch.api_client.paths.v1_model_bundles.post import ( - CreateModelBundleV1ModelBundlesPost, -) -from launch.api_client.paths.v1_model_bundles_clone_with_changes.post import ( - CloneModelBundleWithChangesV1ModelBundlesCloneWithChangesPost, -) -from launch.api_client.paths.v1_model_bundles_latest.get import ( - GetLatestModelBundleV1ModelBundlesLatestGet, -) -from launch.api_client.paths.v1_model_bundles_model_bundle_id.get import ( - GetModelBundleV1ModelBundlesModelBundleIdGet, -) -from launch.api_client.paths.v1_model_endpoints.get import ( - ListModelEndpointsV1ModelEndpointsGet, -) -from launch.api_client.paths.v1_model_endpoints.post import ( - CreateModelEndpointV1ModelEndpointsPost, -) -from launch.api_client.paths.v1_model_endpoints_api.get import ( - GetModelEndpointsApiV1ModelEndpointsApiGet, -) -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id.delete import ( - DeleteModelEndpointV1ModelEndpointsModelEndpointIdDelete, -) -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id.get import ( - GetModelEndpointV1ModelEndpointsModelEndpointIdGet, -) -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id.put import ( - UpdateModelEndpointV1ModelEndpointsModelEndpointIdPut, -) -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart.post import ( - RestartModelEndpointV1ModelEndpointsModelEndpointIdRestartPost, -) -from launch.api_client.paths.v1_model_endpoints_schema_json.get import ( - GetModelEndpointsSchemaV1ModelEndpointsSchemaJsonGet, -) -from launch.api_client.paths.v1_streaming_tasks.post import ( - CreateStreamingInferenceTaskV1StreamingTasksPost, -) -from launch.api_client.paths.v1_sync_tasks.post import ( - CreateSyncInferenceTaskV1SyncTasksPost, -) -from launch.api_client.paths.v1_triggers.get import ListTriggersV1TriggersGet -from launch.api_client.paths.v1_triggers.post import ( - CreateTriggerV1TriggersPost, -) -from launch.api_client.paths.v1_triggers_trigger_id.delete import ( - DeleteTriggerV1TriggersTriggerIdDelete, -) -from launch.api_client.paths.v1_triggers_trigger_id.get import ( - GetTriggerV1TriggersTriggerIdGet, -) -from launch.api_client.paths.v1_triggers_trigger_id.put import ( - UpdateTriggerV1TriggersTriggerIdPut, -) -from launch.api_client.paths.v2_batch_completions.post import ( - BatchCompletionsV2BatchCompletionsPost, -) -from launch.api_client.paths.v2_batch_completions_batch_completion_id.get import ( - GetBatchCompletionV2BatchCompletionsBatchCompletionIdGet, -) -from launch.api_client.paths.v2_batch_completions_batch_completion_id.post import ( - UpdateBatchCompletionV2BatchCompletionsBatchCompletionIdPost, -) -from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel.post import ( - CancelBatchCompletionV2BatchCompletionsBatchCompletionIdActionsCancelPost, -) -from launch.api_client.paths.v2_chat_completions.post import ( - ChatCompletionV2ChatCompletionsPost, -) -from launch.api_client.paths.v2_completions.post import ( - CompletionV2CompletionsPost, -) -from launch.api_client.paths.v2_model_bundles.get import ( - ListModelBundlesV2ModelBundlesGet, -) -from launch.api_client.paths.v2_model_bundles.post import ( - CreateModelBundleV2ModelBundlesPost, -) -from launch.api_client.paths.v2_model_bundles_clone_with_changes.post import ( - CloneModelBundleWithChangesV2ModelBundlesCloneWithChangesPost, -) -from launch.api_client.paths.v2_model_bundles_latest.get import ( - GetLatestModelBundleV2ModelBundlesLatestGet, -) -from launch.api_client.paths.v2_model_bundles_model_bundle_id.get import ( - GetModelBundleV2ModelBundlesModelBundleIdGet, -) - - -class DefaultApi( - BatchCompletionsV2BatchCompletionsPost, - CancelBatchCompletionV2BatchCompletionsBatchCompletionIdActionsCancelPost, - CancelFineTuneV1LlmFineTunesFineTuneIdCancelPut, - ChatCompletionV2ChatCompletionsPost, - CloneModelBundleWithChangesV1ModelBundlesCloneWithChangesPost, - CloneModelBundleWithChangesV2ModelBundlesCloneWithChangesPost, - CompletionV2CompletionsPost, - CreateAsyncInferenceTaskV1AsyncTasksPost, - CreateBatchCompletionsV1LlmBatchCompletionsPost, - CreateBatchJobV1BatchJobsPost, - CreateCompletionStreamTaskV1LlmCompletionsStreamPost, - CreateCompletionSyncTaskV1LlmCompletionsSyncPost, - CreateDockerImageBatchJobBundleV1DockerImageBatchJobBundlesPost, - CreateDockerImageBatchJobV1DockerImageBatchJobsPost, - CreateFineTuneV1LlmFineTunesPost, - CreateModelBundleV1ModelBundlesPost, - CreateModelBundleV2ModelBundlesPost, - CreateModelEndpointV1LlmModelEndpointsPost, - CreateModelEndpointV1ModelEndpointsPost, - CreateStreamingInferenceTaskV1StreamingTasksPost, - CreateSyncInferenceTaskV1SyncTasksPost, - CreateTriggerV1TriggersPost, - DeleteFileV1FilesFileIdDelete, - DeleteLlmModelEndpointV1LlmModelEndpointsModelEndpointNameDelete, - DeleteModelEndpointV1ModelEndpointsModelEndpointIdDelete, - DeleteTriggerV1TriggersTriggerIdDelete, - DownloadModelEndpointV1LlmModelEndpointsDownloadPost, - GetAsyncInferenceTaskV1AsyncTasksTaskIdGet, - GetBatchCompletionV2BatchCompletionsBatchCompletionIdGet, - GetBatchJobV1BatchJobsBatchJobIdGet, - GetDockerImageBatchJobModelBundleV1DockerImageBatchJobBundlesDockerImageBatchJobBundleIdGet, - GetDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdGet, - GetFileContentV1FilesFileIdContentGet, - GetFileV1FilesFileIdGet, - GetFineTuneEventsV1LlmFineTunesFineTuneIdEventsGet, - GetFineTuneV1LlmFineTunesFineTuneIdGet, - GetLatestDockerImageBatchJobBundleV1DockerImageBatchJobBundlesLatestGet, - GetLatestModelBundleV1ModelBundlesLatestGet, - GetLatestModelBundleV2ModelBundlesLatestGet, - GetModelBundleV1ModelBundlesModelBundleIdGet, - GetModelBundleV2ModelBundlesModelBundleIdGet, - GetModelEndpointV1LlmModelEndpointsModelEndpointNameGet, - GetModelEndpointV1ModelEndpointsModelEndpointIdGet, - GetModelEndpointsApiV1ModelEndpointsApiGet, - GetModelEndpointsSchemaV1ModelEndpointsSchemaJsonGet, - GetTriggerV1TriggersTriggerIdGet, - HealthcheckHealthcheckGet, - HealthcheckHealthzGet, - HealthcheckReadyzGet, - ListDockerImageBatchJobModelBundlesV1DockerImageBatchJobBundlesGet, - ListDockerImageBatchJobsV1DockerImageBatchJobsGet, - ListFilesV1FilesGet, - ListFineTunesV1LlmFineTunesGet, - ListModelBundlesV1ModelBundlesGet, - ListModelBundlesV2ModelBundlesGet, - ListModelEndpointsV1LlmModelEndpointsGet, - ListModelEndpointsV1ModelEndpointsGet, - ListTriggersV1TriggersGet, - RestartModelEndpointV1ModelEndpointsModelEndpointIdRestartPost, - UpdateBatchCompletionV2BatchCompletionsBatchCompletionIdPost, - UpdateBatchJobV1BatchJobsBatchJobIdPut, - UpdateDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdPut, - UpdateModelEndpointV1LlmModelEndpointsModelEndpointNamePut, - UpdateModelEndpointV1ModelEndpointsModelEndpointIdPut, - UpdateTriggerV1TriggersTriggerIdPut, - UploadFileV1FilesPost, -): - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - pass diff --git a/launch/api_client/configuration.py b/launch/api_client/configuration.py deleted file mode 100644 index bf5aed0d..00000000 --- a/launch/api_client/configuration.py +++ /dev/null @@ -1,466 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import copy -import logging -import multiprocessing -import sys -from http import client as http_client - -import urllib3 - -from launch.api_client.exceptions import ApiValueError - -JSON_SCHEMA_VALIDATION_KEYWORDS = { - 'multipleOf', 'maximum', 'exclusiveMaximum', - 'minimum', 'exclusiveMinimum', 'maxLength', - 'minLength', 'pattern', 'maxItems', 'minItems', - 'uniqueItems', 'maxProperties', 'minProperties', -} - -class Configuration(object): - """NOTE: This class is auto generated by OpenAPI Generator - - Ref: https://openapi-generator.tech - Do not edit the class manually. - - :param host: Base url - :param api_key: Dict to store API key(s). - Each entry in the dict specifies an API key. - The dict key is the name of the security scheme in the OAS specification. - The dict value is the API key secret. - :param api_key_prefix: Dict to store API prefix (e.g. Bearer) - The dict key is the name of the security scheme in the OAS specification. - The dict value is an API key prefix when generating the auth data. - :param username: Username for HTTP basic authentication - :param password: Password for HTTP basic authentication - :param discard_unknown_keys: Boolean value indicating whether to discard - unknown properties. A server may send a response that includes additional - properties that are not known by the client in the following scenarios: - 1. The OpenAPI document is incomplete, i.e. it does not match the server - implementation. - 2. The client was generated using an older version of the OpenAPI document - and the server has been upgraded since then. - If a schema in the OpenAPI document defines the additionalProperties attribute, - then all undeclared properties received by the server are injected into the - additional properties map. In that case, there are undeclared properties, and - nothing to discard. - :param disabled_client_side_validations (string): Comma-separated list of - JSON schema validation keywords to disable JSON schema structural validation - rules. The following keywords may be specified: multipleOf, maximum, - exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern, - maxItems, minItems. - By default, the validation is performed for data generated locally by the client - and data received from the server, independent of any validation performed by - the server side. If the input data does not satisfy the JSON schema validation - rules specified in the OpenAPI document, an exception is raised. - If disabled_client_side_validations is set, structural validation is - disabled. This can be useful to troubleshoot data validation problem, such as - when the OpenAPI document validation rules do not match the actual API data - received by the server. - :param server_index: Index to servers configuration. - :param server_variables: Mapping with string values to replace variables in - templated server configuration. The validation of enums is performed for - variables with defined enum values before. - :param server_operation_index: Mapping from operation ID to an index to server - configuration. - :param server_operation_variables: Mapping from operation ID to a mapping with - string values to replace variables in templated server configuration. - The validation of enums is performed for variables with defined enum values before. - - :Example: - - HTTP Basic Authentication Example. - Given the following security scheme in the OpenAPI specification: - components: - securitySchemes: - http_basic_auth: - type: http - scheme: basic - - Configure API client with HTTP basic authentication: - -conf = launch.api_client.Configuration( - username='the-user', - password='the-password', -) - - """ - - _default = None - - def __init__( - self, - host=None, - username=None, - password=None, - discard_unknown_keys=False, - disabled_client_side_validations="", - server_index=None, - server_variables=None, - server_operation_index=None, - server_operation_variables=None, - access_token=None, - ): - """Constructor - """ - self._base_path = "http://localhost" if host is None else host - """Default Base url - """ - self.server_index = 0 if server_index is None and host is None else server_index - self.server_operation_index = server_operation_index or {} - """Default server index - """ - self.server_variables = server_variables or {} - self.server_operation_variables = server_operation_variables or {} - """Default server variables - """ - self.temp_folder_path = None - """Temp file folder for downloading files - """ - # Authentication Settings - self.username = username - """Username for HTTP basic authentication - """ - self.password = password - """Password for HTTP basic authentication - """ - self.discard_unknown_keys = discard_unknown_keys - self.disabled_client_side_validations = disabled_client_side_validations - self.access_token = None - """access token for OAuth/Bearer - """ - self.logger = {} - """Logging Settings - """ - self.logger["package_logger"] = logging.getLogger("launch.api_client") - self.logger["urllib3_logger"] = logging.getLogger("urllib3") - self.logger_format = '%(asctime)s %(levelname)s %(message)s' - """Log format - """ - self.logger_stream_handler = None - """Log stream handler - """ - self.logger_file_handler = None - """Log file handler - """ - self.logger_file = None - """Debug file location - """ - self.debug = False - """Debug switch - """ - - self.verify_ssl = True - """SSL/TLS verification - Set this to false to skip verifying SSL certificate when calling API - from https server. - """ - self.ssl_ca_cert = None - """Set this to customize the certificate file to verify the peer. - """ - self.cert_file = None - """client certificate file - """ - self.key_file = None - """client key file - """ - self.assert_hostname = None - """Set this to True/False to enable/disable SSL hostname verification. - """ - - self.connection_pool_maxsize = multiprocessing.cpu_count() * 5 - """urllib3 connection pool's maximum number of connections saved - per pool. urllib3 uses 1 connection as default value, but this is - not the best value when you are making a lot of possibly parallel - requests to the same host, which is often the case here. - cpu_count * 5 is used as default value to increase performance. - """ - - self.proxy = None - """Proxy URL - """ - self.proxy_headers = None - """Proxy headers - """ - self.safe_chars_for_path_param = '' - """Safe chars for path_param - """ - self.retries = None - """Adding retries to override urllib3 default value 3 - """ - # Enable client side validation - self.client_side_validation = True - - # Options to pass down to the underlying urllib3 socket - self.socket_options = None - - def __deepcopy__(self, memo): - cls = self.__class__ - result = cls.__new__(cls) - memo[id(self)] = result - for k, v in self.__dict__.items(): - if k not in ('logger', 'logger_file_handler'): - setattr(result, k, copy.deepcopy(v, memo)) - # shallow copy of loggers - result.logger = copy.copy(self.logger) - # use setters to configure loggers - result.logger_file = self.logger_file - result.debug = self.debug - return result - - def __setattr__(self, name, value): - object.__setattr__(self, name, value) - if name == 'disabled_client_side_validations': - s = set(filter(None, value.split(','))) - for v in s: - if v not in JSON_SCHEMA_VALIDATION_KEYWORDS: - raise ApiValueError( - "Invalid keyword: '{0}''".format(v)) - self._disabled_client_side_validations = s - - @classmethod - def set_default(cls, default): - """Set default instance of configuration. - - It stores default configuration, which can be - returned by get_default_copy method. - - :param default: object of Configuration - """ - cls._default = copy.deepcopy(default) - - @classmethod - def get_default_copy(cls): - """Return new instance of configuration. - - This method returns newly created, based on default constructor, - object of Configuration class or returns a copy of default - configuration passed by the set_default method. - - :return: The configuration object. - """ - if cls._default is not None: - return copy.deepcopy(cls._default) - return Configuration() - - @property - def logger_file(self): - """The logger file. - - If the logger_file is None, then add stream handler and remove file - handler. Otherwise, add file handler and remove stream handler. - - :param value: The logger_file path. - :type: str - """ - return self.__logger_file - - @logger_file.setter - def logger_file(self, value): - """The logger file. - - If the logger_file is None, then add stream handler and remove file - handler. Otherwise, add file handler and remove stream handler. - - :param value: The logger_file path. - :type: str - """ - self.__logger_file = value - if self.__logger_file: - # If set logging file, - # then add file handler and remove stream handler. - self.logger_file_handler = logging.FileHandler(self.__logger_file) - self.logger_file_handler.setFormatter(self.logger_formatter) - for _, logger in self.logger.items(): - logger.addHandler(self.logger_file_handler) - - @property - def debug(self): - """Debug status - - :param value: The debug status, True or False. - :type: bool - """ - return self.__debug - - @debug.setter - def debug(self, value): - """Debug status - - :param value: The debug status, True or False. - :type: bool - """ - self.__debug = value - if self.__debug: - # if debug status is True, turn on debug logging - for _, logger in self.logger.items(): - logger.setLevel(logging.DEBUG) - # turn on http_client debug - http_client.HTTPConnection.debuglevel = 1 - else: - # if debug status is False, turn off debug logging, - # setting log level to default `logging.WARNING` - for _, logger in self.logger.items(): - logger.setLevel(logging.WARNING) - # turn off http_client debug - http_client.HTTPConnection.debuglevel = 0 - - @property - def logger_format(self): - """The logger format. - - The logger_formatter will be updated when sets logger_format. - - :param value: The format string. - :type: str - """ - return self.__logger_format - - @logger_format.setter - def logger_format(self, value): - """The logger format. - - The logger_formatter will be updated when sets logger_format. - - :param value: The format string. - :type: str - """ - self.__logger_format = value - self.logger_formatter = logging.Formatter(self.__logger_format) - - def get_api_key_with_prefix(self, identifier, alias=None): - """Gets API key (with prefix if set). - - :param identifier: The identifier of apiKey. - :param alias: The alternative identifier of apiKey. - :return: The token for api key authentication. - """ - if self.refresh_api_key_hook is not None: - self.refresh_api_key_hook(self) - key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None) - if key: - prefix = self.api_key_prefix.get(identifier) - if prefix: - return "%s %s" % (prefix, key) - else: - return key - - def get_basic_auth_token(self): - """Gets HTTP basic authentication header (string). - - :return: The token for basic HTTP authentication. - """ - username = "" - if self.username is not None: - username = self.username - password = "" - if self.password is not None: - password = self.password - return urllib3.util.make_headers( - basic_auth=username + ':' + password - ).get('authorization') - - def auth_settings(self): - """Gets Auth Settings dict for api client. - - :return: The Auth Settings information dict. - """ - auth = {} - if self.username is not None and self.password is not None: - auth['HTTPBasic'] = { - 'type': 'basic', - 'in': 'header', - 'key': 'Authorization', - 'value': self.get_basic_auth_token() - } - if self.access_token is not None: - auth['OAuth2PasswordBearer'] = { - 'type': 'oauth2', - 'in': 'header', - 'key': 'Authorization', - 'value': 'Bearer ' + self.access_token - } - return auth - - def to_debug_report(self): - """Gets the essential information for debugging. - - :return: The report for debugging. - """ - return "Python SDK Debug Report:\n"\ - "OS: {env}\n"\ - "Python Version: {pyversion}\n"\ - "Version of the API: 1.0.0\n"\ - "SDK Package Version: 1.0.0".\ - format(env=sys.platform, pyversion=sys.version) - - def get_host_settings(self): - """Gets an array of host settings - - :return: An array of host settings - """ - return [ - { - 'url': "", - 'description': "No description provided", - } - ] - - def get_host_from_settings(self, index, variables=None, servers=None): - """Gets host URL based on the index and variables - :param index: array index of the host settings - :param variables: hash of variable and the corresponding value - :param servers: an array of host settings or None - :return: URL based on host settings - """ - if index is None: - return self._base_path - - variables = {} if variables is None else variables - servers = self.get_host_settings() if servers is None else servers - - try: - server = servers[index] - except IndexError: - raise ValueError( - "Invalid index {0} when selecting the host settings. " - "Must be less than {1}".format(index, len(servers))) - - url = server['url'] - - # go through variables and replace placeholders - for variable_name, variable in server.get('variables', {}).items(): - used_value = variables.get( - variable_name, variable['default_value']) - - if 'enum_values' in variable \ - and used_value not in variable['enum_values']: - raise ValueError( - "The variable `{0}` in the host URL has invalid value " - "{1}. Must be {2}.".format( - variable_name, variables[variable_name], - variable['enum_values'])) - - url = url.replace("{" + variable_name + "}", used_value) - - return url - - @property - def host(self): - """Return generated host.""" - return self.get_host_from_settings(self.server_index, variables=self.server_variables) - - @host.setter - def host(self, value): - """Fix base path.""" - self._base_path = value - self.server_index = None diff --git a/launch/api_client/exceptions.py b/launch/api_client/exceptions.py deleted file mode 100644 index 46687131..00000000 --- a/launch/api_client/exceptions.py +++ /dev/null @@ -1,147 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" -import dataclasses -import typing - -from urllib3._collections import HTTPHeaderDict - - -class OpenApiException(Exception): - """The base exception class for all OpenAPIExceptions""" - - -class ApiTypeError(OpenApiException, TypeError): - def __init__(self, msg, path_to_item=None, valid_classes=None, - key_type=None): - """ Raises an exception for TypeErrors - - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (list): a list of keys an indices to get to the - current_item - None if unset - valid_classes (tuple): the primitive classes that current item - should be an instance of - None if unset - key_type (bool): False if our value is a value in a dict - True if it is a key in a dict - False if our item is an item in a list - None if unset - """ - self.path_to_item = path_to_item - self.valid_classes = valid_classes - self.key_type = key_type - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiTypeError, self).__init__(full_msg) - - -class ApiValueError(OpenApiException, ValueError): - def __init__(self, msg, path_to_item=None): - """ - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (list) the path to the exception in the - received_data dict. None if unset - """ - - self.path_to_item = path_to_item - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiValueError, self).__init__(full_msg) - - -class ApiAttributeError(OpenApiException, AttributeError): - def __init__(self, msg, path_to_item=None): - """ - Raised when an attribute reference or assignment fails. - - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (None/list) the path to the exception in the - received_data dict - """ - self.path_to_item = path_to_item - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiAttributeError, self).__init__(full_msg) - - -class ApiKeyError(OpenApiException, KeyError): - def __init__(self, msg, path_to_item=None): - """ - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (None/list) the path to the exception in the - received_data dict - """ - self.path_to_item = path_to_item - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiKeyError, self).__init__(full_msg) - - -T = typing.TypeVar("T") - - -@dataclasses.dataclass -class ApiException(OpenApiException, typing.Generic[T]): - status: int - reason: str - api_response: typing.Optional[T] = None - - @property - def body(self) -> typing.Union[str, bytes, None]: - if not self.api_response: - return None - return self.api_response.response.data - - @property - def headers(self) -> typing.Optional[HTTPHeaderDict]: - if not self.api_response: - return None - return self.api_response.response.getheaders() - - def __str__(self): - """Custom error messages for exception""" - error_message = "({0})\n"\ - "Reason: {1}\n".format(self.status, self.reason) - if self.headers: - error_message += "HTTP response headers: {0}\n".format( - self.headers) - - if self.body: - error_message += "HTTP response body: {0}\n".format(self.body) - - return error_message - - -def render_path(path_to_item): - """Returns a string representation of a path""" - result = "" - for pth in path_to_item: - if isinstance(pth, int): - result += "[{0}]".format(pth) - else: - result += "['{0}']".format(pth) - return result diff --git a/launch/api_client/model/__init__.py b/launch/api_client/model/__init__.py deleted file mode 100644 index 4a66ef07..00000000 --- a/launch/api_client/model/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# we can not import model classes here because that would create a circular -# reference which would not work in python2 -# do not import all models into this module because that uses a lot of memory and stack frames -# if you need the ability to import all models from one package, import them with -# from launch.api_client.models import ModelA, ModelB diff --git a/launch/api_client/model/annotation.py b/launch/api_client/model/annotation.py deleted file mode 100644 index cd68a7ec..00000000 --- a/launch/api_client/model/annotation.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Annotation( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "type", - "url_citation", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "url_citation": "URL_CITATION", - } - - @schemas.classproperty - def URL_CITATION(cls): - return cls("url_citation") - - @staticmethod - def url_citation() -> typing.Type['UrlCitation']: - return UrlCitation - __annotations__ = { - "type": type, - "url_citation": url_citation, - } - - type: MetaOapg.properties.type - url_citation: 'UrlCitation' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url_citation"]) -> 'UrlCitation': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "url_citation", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["url_citation"]) -> 'UrlCitation': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "url_citation", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - type: typing.Union[MetaOapg.properties.type, str, ], - url_citation: 'UrlCitation', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Annotation': - return super().__new__( - cls, - *_args, - type=type, - url_citation=url_citation, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.url_citation import UrlCitation diff --git a/launch/api_client/model/audio.py b/launch/api_client/model/audio.py deleted file mode 100644 index 1fc9d562..00000000 --- a/launch/api_client/model/audio.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Audio( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "id", - } - - class properties: - id = schemas.StrSchema - __annotations__ = { - "id": id, - } - - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - id: typing.Union[MetaOapg.properties.id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Audio': - return super().__new__( - cls, - *_args, - id=id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/audio1.py b/launch/api_client/model/audio1.py deleted file mode 100644 index 8d5641c9..00000000 --- a/launch/api_client/model/audio1.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Audio1( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "expires_at", - "transcript", - "data", - "id", - } - - class properties: - id = schemas.StrSchema - expires_at = schemas.IntSchema - data = schemas.StrSchema - transcript = schemas.StrSchema - __annotations__ = { - "id": id, - "expires_at": expires_at, - "data": data, - "transcript": transcript, - } - - expires_at: MetaOapg.properties.expires_at - transcript: MetaOapg.properties.transcript - data: MetaOapg.properties.data - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["transcript"]) -> MetaOapg.properties.transcript: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "expires_at", "data", "transcript", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["transcript"]) -> MetaOapg.properties.transcript: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "expires_at", "data", "transcript", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - expires_at: typing.Union[MetaOapg.properties.expires_at, decimal.Decimal, int, ], - transcript: typing.Union[MetaOapg.properties.transcript, str, ], - data: typing.Union[MetaOapg.properties.data, str, ], - id: typing.Union[MetaOapg.properties.id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Audio1': - return super().__new__( - cls, - *_args, - expires_at=expires_at, - transcript=transcript, - data=data, - id=id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/audio2.py b/launch/api_client/model/audio2.py deleted file mode 100644 index c9552fcf..00000000 --- a/launch/api_client/model/audio2.py +++ /dev/null @@ -1,140 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Audio2( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "voice", - "format", - } - - class properties: - - @staticmethod - def voice() -> typing.Type['VoiceIdsShared']: - return VoiceIdsShared - - - class format( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "wav": "WAV", - "aac": "AAC", - "mp3": "MP3", - "flac": "FLAC", - "opus": "OPUS", - "pcm16": "PCM16", - } - - @schemas.classproperty - def WAV(cls): - return cls("wav") - - @schemas.classproperty - def AAC(cls): - return cls("aac") - - @schemas.classproperty - def MP3(cls): - return cls("mp3") - - @schemas.classproperty - def FLAC(cls): - return cls("flac") - - @schemas.classproperty - def OPUS(cls): - return cls("opus") - - @schemas.classproperty - def PCM16(cls): - return cls("pcm16") - __annotations__ = { - "voice": voice, - "format": format, - } - - voice: 'VoiceIdsShared' - format: MetaOapg.properties.format - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["voice"]) -> 'VoiceIdsShared': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["voice", "format", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["voice"]) -> 'VoiceIdsShared': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["voice", "format", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - voice: 'VoiceIdsShared', - format: typing.Union[MetaOapg.properties.format, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Audio2': - return super().__new__( - cls, - *_args, - voice=voice, - format=format, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.voice_ids_shared import VoiceIdsShared diff --git a/launch/api_client/model/batch_completions_job.py b/launch/api_client/model/batch_completions_job.py deleted file mode 100644 index 6423df74..00000000 --- a/launch/api_client/model/batch_completions_job.py +++ /dev/null @@ -1,289 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class BatchCompletionsJob( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "completed_at", - "metadata", - "expires_at", - "model_config", - "job_id", - "created_at", - "output_data_path", - "status", - } - - class properties: - job_id = schemas.StrSchema - output_data_path = schemas.StrSchema - - @staticmethod - def model_config() -> typing.Type['BatchCompletionsModelConfig']: - return BatchCompletionsModelConfig - - @staticmethod - def status() -> typing.Type['BatchCompletionsJobStatus']: - return BatchCompletionsJobStatus - created_at = schemas.StrSchema - expires_at = schemas.StrSchema - - - class completed_at( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'completed_at': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class input_data_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'input_data_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class priority( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "job_id": job_id, - "output_data_path": output_data_path, - "model_config": model_config, - "status": status, - "created_at": created_at, - "expires_at": expires_at, - "completed_at": completed_at, - "metadata": metadata, - "input_data_path": input_data_path, - "priority": priority, - } - - completed_at: MetaOapg.properties.completed_at - metadata: MetaOapg.properties.metadata - expires_at: MetaOapg.properties.expires_at - model_config: 'BatchCompletionsModelConfig' - job_id: MetaOapg.properties.job_id - created_at: MetaOapg.properties.created_at - output_data_path: MetaOapg.properties.output_data_path - status: 'BatchCompletionsJobStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "input_data_path", "priority", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "input_data_path", "priority", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - completed_at: typing.Union[MetaOapg.properties.completed_at, None, str, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, ], - expires_at: typing.Union[MetaOapg.properties.expires_at, str, ], - model_config: 'BatchCompletionsModelConfig', - job_id: typing.Union[MetaOapg.properties.job_id, str, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, ], - output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], - status: 'BatchCompletionsJobStatus', - input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, - priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'BatchCompletionsJob': - return super().__new__( - cls, - *_args, - completed_at=completed_at, - metadata=metadata, - expires_at=expires_at, - model_config=model_config, - job_id=job_id, - created_at=created_at, - output_data_path=output_data_path, - status=status, - input_data_path=input_data_path, - priority=priority, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_completions_job_status import ( - BatchCompletionsJobStatus, -) -from launch.api_client.model.batch_completions_model_config import ( - BatchCompletionsModelConfig, -) diff --git a/launch/api_client/model/batch_completions_job_status.py b/launch/api_client/model/batch_completions_job_status.py deleted file mode 100644 index a8cc49e2..00000000 --- a/launch/api_client/model/batch_completions_job_status.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class BatchCompletionsJobStatus( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "queued": "QUEUED", - "running": "RUNNING", - "completed": "COMPLETED", - "failed": "FAILED", - "cancelled": "CANCELLED", - "unknown": "UNKNOWN", - } - - @schemas.classproperty - def QUEUED(cls): - return cls("queued") - - @schemas.classproperty - def RUNNING(cls): - return cls("running") - - @schemas.classproperty - def COMPLETED(cls): - return cls("completed") - - @schemas.classproperty - def FAILED(cls): - return cls("failed") - - @schemas.classproperty - def CANCELLED(cls): - return cls("cancelled") - - @schemas.classproperty - def UNKNOWN(cls): - return cls("unknown") diff --git a/launch/api_client/model/batch_completions_model_config.py b/launch/api_client/model/batch_completions_model_config.py deleted file mode 100644 index 317ee9e8..00000000 --- a/launch/api_client/model/batch_completions_model_config.py +++ /dev/null @@ -1,1290 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class BatchCompletionsModelConfig( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "model", - } - - class properties: - model = schemas.StrSchema - - - class max_model_len( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_model_len': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_seqs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_seqs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enforce_eager( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enforce_eager': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class trust_remote_code( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'trust_remote_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class pipeline_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'pipeline_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tensor_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tensor_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_log_requests( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_log_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tool_call_parser( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tool_call_parser': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_auto_tool_choice( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_auto_tool_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class config_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'config_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer_mode( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_mode': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class limit_mm_per_prompt( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'limit_mm_per_prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_batched_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_batched_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class code_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'code_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class rope_scaling( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'rope_scaling': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class tokenizer_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization_param_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization_param_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_seq_len_to_capture( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_seq_len_to_capture': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_sliding_window( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_sliding_window': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_tokenizer_init( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_tokenizer_init': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class served_model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'served_model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class override_neuron_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'override_neuron_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class mm_processor_kwargs( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'mm_processor_kwargs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class block_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'block_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class gpu_memory_utilization( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpu_memory_utilization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class swap_space( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'swap_space': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cache_dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cache_dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_gpu_blocks_override( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_gpu_blocks_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_prefix_caching( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_prefix_caching': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_shards( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_shards': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_context_length( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_context_length': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class response_role( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'response_role': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "model": model, - "max_model_len": max_model_len, - "max_num_seqs": max_num_seqs, - "enforce_eager": enforce_eager, - "trust_remote_code": trust_remote_code, - "pipeline_parallel_size": pipeline_parallel_size, - "tensor_parallel_size": tensor_parallel_size, - "quantization": quantization, - "disable_log_requests": disable_log_requests, - "chat_template": chat_template, - "tool_call_parser": tool_call_parser, - "enable_auto_tool_choice": enable_auto_tool_choice, - "load_format": load_format, - "config_format": config_format, - "tokenizer_mode": tokenizer_mode, - "limit_mm_per_prompt": limit_mm_per_prompt, - "max_num_batched_tokens": max_num_batched_tokens, - "tokenizer": tokenizer, - "dtype": dtype, - "seed": seed, - "revision": revision, - "code_revision": code_revision, - "rope_scaling": rope_scaling, - "tokenizer_revision": tokenizer_revision, - "quantization_param_path": quantization_param_path, - "max_seq_len_to_capture": max_seq_len_to_capture, - "disable_sliding_window": disable_sliding_window, - "skip_tokenizer_init": skip_tokenizer_init, - "served_model_name": served_model_name, - "override_neuron_config": override_neuron_config, - "mm_processor_kwargs": mm_processor_kwargs, - "block_size": block_size, - "gpu_memory_utilization": gpu_memory_utilization, - "swap_space": swap_space, - "cache_dtype": cache_dtype, - "num_gpu_blocks_override": num_gpu_blocks_override, - "enable_prefix_caching": enable_prefix_caching, - "checkpoint_path": checkpoint_path, - "num_shards": num_shards, - "max_context_length": max_context_length, - "response_role": response_role, - } - - model: MetaOapg.properties.model - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_context_length"]) -> MetaOapg.properties.max_context_length: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["response_role"]) -> MetaOapg.properties.response_role: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_context_length"]) -> typing.Union[MetaOapg.properties.max_context_length, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["response_role"]) -> typing.Union[MetaOapg.properties.response_role, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model: typing.Union[MetaOapg.properties.model, str, ], - max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, - trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, - pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, - disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, - enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, - load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, - config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, - tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, - limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, - max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, - dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, - code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, - rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, - quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, - max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, - skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, - served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, - override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, - num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_context_length: typing.Union[MetaOapg.properties.max_context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - response_role: typing.Union[MetaOapg.properties.response_role, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'BatchCompletionsModelConfig': - return super().__new__( - cls, - *_args, - model=model, - max_model_len=max_model_len, - max_num_seqs=max_num_seqs, - enforce_eager=enforce_eager, - trust_remote_code=trust_remote_code, - pipeline_parallel_size=pipeline_parallel_size, - tensor_parallel_size=tensor_parallel_size, - quantization=quantization, - disable_log_requests=disable_log_requests, - chat_template=chat_template, - tool_call_parser=tool_call_parser, - enable_auto_tool_choice=enable_auto_tool_choice, - load_format=load_format, - config_format=config_format, - tokenizer_mode=tokenizer_mode, - limit_mm_per_prompt=limit_mm_per_prompt, - max_num_batched_tokens=max_num_batched_tokens, - tokenizer=tokenizer, - dtype=dtype, - seed=seed, - revision=revision, - code_revision=code_revision, - rope_scaling=rope_scaling, - tokenizer_revision=tokenizer_revision, - quantization_param_path=quantization_param_path, - max_seq_len_to_capture=max_seq_len_to_capture, - disable_sliding_window=disable_sliding_window, - skip_tokenizer_init=skip_tokenizer_init, - served_model_name=served_model_name, - override_neuron_config=override_neuron_config, - mm_processor_kwargs=mm_processor_kwargs, - block_size=block_size, - gpu_memory_utilization=gpu_memory_utilization, - swap_space=swap_space, - cache_dtype=cache_dtype, - num_gpu_blocks_override=num_gpu_blocks_override, - enable_prefix_caching=enable_prefix_caching, - checkpoint_path=checkpoint_path, - num_shards=num_shards, - max_context_length=max_context_length, - response_role=response_role, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/batch_job_serialization_format.py b/launch/api_client/model/batch_job_serialization_format.py deleted file mode 100644 index 7a95ec76..00000000 --- a/launch/api_client/model/batch_job_serialization_format.py +++ /dev/null @@ -1,49 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class BatchJobSerializationFormat( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "JSON": "JSON", - "PICKLE": "PICKLE", - } - - @schemas.classproperty - def JSON(cls): - return cls("JSON") - - @schemas.classproperty - def PICKLE(cls): - return cls("PICKLE") diff --git a/launch/api_client/model/batch_job_status.py b/launch/api_client/model/batch_job_status.py deleted file mode 100644 index 01a30c4b..00000000 --- a/launch/api_client/model/batch_job_status.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class BatchJobStatus( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "PENDING": "PENDING", - "RUNNING": "RUNNING", - "SUCCESS": "SUCCESS", - "FAILURE": "FAILURE", - "CANCELLED": "CANCELLED", - "UNDEFINED": "UNDEFINED", - "TIMEOUT": "TIMEOUT", - } - - @schemas.classproperty - def PENDING(cls): - return cls("PENDING") - - @schemas.classproperty - def RUNNING(cls): - return cls("RUNNING") - - @schemas.classproperty - def SUCCESS(cls): - return cls("SUCCESS") - - @schemas.classproperty - def FAILURE(cls): - return cls("FAILURE") - - @schemas.classproperty - def CANCELLED(cls): - return cls("CANCELLED") - - @schemas.classproperty - def UNDEFINED(cls): - return cls("UNDEFINED") - - @schemas.classproperty - def TIMEOUT(cls): - return cls("TIMEOUT") diff --git a/launch/api_client/model/body_upload_file_v1_files_post.py b/launch/api_client/model/body_upload_file_v1_files_post.py deleted file mode 100644 index d0359487..00000000 --- a/launch/api_client/model/body_upload_file_v1_files_post.py +++ /dev/null @@ -1,201 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class BodyUploadFileV1FilesPost( - schemas.AnyTypeSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "file", - } - - class properties: - class file( - schemas.BinaryBase, - schemas.AnyTypeSchema, - ): - class MetaOapg: - format = "binary" - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "file": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - __annotations__ = { - "file": file, - } - - file: MetaOapg.properties.file - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["file"]) -> MetaOapg.properties.file: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "file", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["file"]) -> MetaOapg.properties.file: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "file", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - file: typing.Union[ - MetaOapg.properties.file, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "BodyUploadFileV1FilesPost": - return super().__new__( - cls, - *_args, - file=file, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/callback_auth.py b/launch/api_client/model/callback_auth.py deleted file mode 100644 index 558e75d3..00000000 --- a/launch/api_client/model/callback_auth.py +++ /dev/null @@ -1,79 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CallbackAuth( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - @staticmethod - def discriminator(): - return { - 'kind': { - 'CallbackBasicAuth': CallbackBasicAuth, - 'CallbackmTLSAuth': CallbackmTLSAuth, - 'basic': CallbackBasicAuth, - 'mtls': CallbackmTLSAuth, - } - } - - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CallbackBasicAuth, - CallbackmTLSAuth, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CallbackAuth': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_basic_auth import CallbackBasicAuth -from launch.api_client.model.callbackm_tls_auth import CallbackmTLSAuth diff --git a/launch/api_client/model/callback_basic_auth.py b/launch/api_client/model/callback_basic_auth.py deleted file mode 100644 index eec2406c..00000000 --- a/launch/api_client/model/callback_basic_auth.py +++ /dev/null @@ -1,122 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CallbackBasicAuth( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "password", - "kind", - "username", - } - - class properties: - - - class kind( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "basic": "BASIC", - } - - @schemas.classproperty - def BASIC(cls): - return cls("basic") - username = schemas.StrSchema - password = schemas.StrSchema - __annotations__ = { - "kind": kind, - "username": username, - "password": password, - } - - password: MetaOapg.properties.password - kind: MetaOapg.properties.kind - username: MetaOapg.properties.username - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["kind", "username", "password", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["kind", "username", "password", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - password: typing.Union[MetaOapg.properties.password, str, ], - kind: typing.Union[MetaOapg.properties.kind, str, ], - username: typing.Union[MetaOapg.properties.username, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CallbackBasicAuth': - return super().__new__( - cls, - *_args, - password=password, - kind=kind, - username=username, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/callbackm_tls_auth.py b/launch/api_client/model/callbackm_tls_auth.py deleted file mode 100644 index 2c1300cd..00000000 --- a/launch/api_client/model/callbackm_tls_auth.py +++ /dev/null @@ -1,122 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CallbackmTLSAuth( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "kind", - "cert", - "key", - } - - class properties: - - - class kind( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "mtls": "MTLS", - } - - @schemas.classproperty - def MTLS(cls): - return cls("mtls") - cert = schemas.StrSchema - key = schemas.StrSchema - __annotations__ = { - "kind": kind, - "cert": cert, - "key": key, - } - - kind: MetaOapg.properties.kind - cert: MetaOapg.properties.cert - key: MetaOapg.properties.key - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["kind", "cert", "key", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["kind", "cert", "key", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - kind: typing.Union[MetaOapg.properties.kind, str, ], - cert: typing.Union[MetaOapg.properties.cert, str, ], - key: typing.Union[MetaOapg.properties.key, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CallbackmTLSAuth': - return super().__new__( - cls, - *_args, - kind=kind, - cert=cert, - key=key, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/cancel_batch_completions_v2_response.py b/launch/api_client/model/cancel_batch_completions_v2_response.py deleted file mode 100644 index 9f1f67ea..00000000 --- a/launch/api_client/model/cancel_batch_completions_v2_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CancelBatchCompletionsV2Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - success: typing.Union[MetaOapg.properties.success, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CancelBatchCompletionsV2Response': - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/cancel_fine_tune_job_response.py b/launch/api_client/model/cancel_fine_tune_job_response.py deleted file mode 100644 index f83a8251..00000000 --- a/launch/api_client/model/cancel_fine_tune_job_response.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CancelFineTuneResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "success", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "success", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CancelFineTuneResponse": - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/cancel_fine_tune_response.py b/launch/api_client/model/cancel_fine_tune_response.py deleted file mode 100644 index 08f8438a..00000000 --- a/launch/api_client/model/cancel_fine_tune_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CancelFineTuneResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - success: typing.Union[MetaOapg.properties.success, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CancelFineTuneResponse': - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/chat_completion_function_call_option.py b/launch/api_client/model/chat_completion_function_call_option.py deleted file mode 100644 index 5e7af1b0..00000000 --- a/launch/api_client/model/chat_completion_function_call_option.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionFunctionCallOption( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "name", - } - - class properties: - name = schemas.StrSchema - __annotations__ = { - "name": name, - } - - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionFunctionCallOption': - return super().__new__( - cls, - *_args, - name=name, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/chat_completion_functions.py b/launch/api_client/model/chat_completion_functions.py deleted file mode 100644 index 9135c5bd..00000000 --- a/launch/api_client/model/chat_completion_functions.py +++ /dev/null @@ -1,127 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionFunctions( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "name", - } - - class properties: - name = schemas.StrSchema - - - class description( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'description': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def parameters() -> typing.Type['FunctionParameters']: - return FunctionParameters - __annotations__ = { - "name": name, - "description": description, - "parameters": parameters, - } - - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["parameters"]) -> 'FunctionParameters': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["parameters"]) -> typing.Union['FunctionParameters', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - description: typing.Union[MetaOapg.properties.description, None, str, schemas.Unset] = schemas.unset, - parameters: typing.Union['FunctionParameters', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionFunctions': - return super().__new__( - cls, - *_args, - name=name, - description=description, - parameters=parameters, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.function_parameters import FunctionParameters diff --git a/launch/api_client/model/chat_completion_message_tool_call.py b/launch/api_client/model/chat_completion_message_tool_call.py deleted file mode 100644 index 01ca895a..00000000 --- a/launch/api_client/model/chat_completion_message_tool_call.py +++ /dev/null @@ -1,127 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionMessageToolCall( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "function", - "id", - "type", - } - - class properties: - id = schemas.StrSchema - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "function": "FUNCTION", - } - - @schemas.classproperty - def FUNCTION(cls): - return cls("function") - - @staticmethod - def function() -> typing.Type['Function1']: - return Function1 - __annotations__ = { - "id": id, - "type": type, - "function": function, - } - - function: 'Function1' - id: MetaOapg.properties.id - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'Function1': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "type", "function", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> 'Function1': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "type", "function", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - function: 'Function1', - id: typing.Union[MetaOapg.properties.id, str, ], - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionMessageToolCall': - return super().__new__( - cls, - *_args, - function=function, - id=id, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.function1 import Function1 diff --git a/launch/api_client/model/chat_completion_message_tool_call_chunk.py b/launch/api_client/model/chat_completion_message_tool_call_chunk.py deleted file mode 100644 index 86b0955d..00000000 --- a/launch/api_client/model/chat_completion_message_tool_call_chunk.py +++ /dev/null @@ -1,167 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionMessageToolCallChunk( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "index", - } - - class properties: - index = schemas.IntSchema - - - class id( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class type( - schemas.EnumBase, - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - class MetaOapg: - enum_value_to_name = { - "function": "FUNCTION", - } - - @schemas.classproperty - def FUNCTION(cls): - return cls("function") - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'type': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def function() -> typing.Type['Function2']: - return Function2 - __annotations__ = { - "index": index, - "id": id, - "type": type, - "function": function, - } - - index: MetaOapg.properties.index - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'Function2': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["index", "id", "type", "function", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> typing.Union[MetaOapg.properties.id, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> typing.Union[MetaOapg.properties.type, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> typing.Union['Function2', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["index", "id", "type", "function", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], - id: typing.Union[MetaOapg.properties.id, None, str, schemas.Unset] = schemas.unset, - type: typing.Union[MetaOapg.properties.type, None, str, schemas.Unset] = schemas.unset, - function: typing.Union['Function2', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionMessageToolCallChunk': - return super().__new__( - cls, - *_args, - index=index, - id=id, - type=type, - function=function, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.function2 import Function2 diff --git a/launch/api_client/model/chat_completion_message_tool_calls_input.py b/launch/api_client/model/chat_completion_message_tool_calls_input.py deleted file mode 100644 index 05b2219f..00000000 --- a/launch/api_client/model/chat_completion_message_tool_calls_input.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionMessageToolCallsInput( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The tool calls generated by the model, such as function calls. - """ - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionMessageToolCall']: - return ChatCompletionMessageToolCall - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionMessageToolCall'], typing.List['ChatCompletionMessageToolCall']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ChatCompletionMessageToolCallsInput': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionMessageToolCall': - return super().__getitem__(i) - -from launch.api_client.model.chat_completion_message_tool_call import ( - ChatCompletionMessageToolCall, -) diff --git a/launch/api_client/model/chat_completion_message_tool_calls_output.py b/launch/api_client/model/chat_completion_message_tool_calls_output.py deleted file mode 100644 index be835da5..00000000 --- a/launch/api_client/model/chat_completion_message_tool_calls_output.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionMessageToolCallsOutput( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The tool calls generated by the model, such as function calls. - """ - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionMessageToolCall']: - return ChatCompletionMessageToolCall - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionMessageToolCall'], typing.List['ChatCompletionMessageToolCall']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ChatCompletionMessageToolCallsOutput': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionMessageToolCall': - return super().__getitem__(i) - -from launch.api_client.model.chat_completion_message_tool_call import ( - ChatCompletionMessageToolCall, -) diff --git a/launch/api_client/model/chat_completion_named_tool_choice.py b/launch/api_client/model/chat_completion_named_tool_choice.py deleted file mode 100644 index f2e531bc..00000000 --- a/launch/api_client/model/chat_completion_named_tool_choice.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionNamedToolChoice( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "function", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "function": "FUNCTION", - } - - @schemas.classproperty - def FUNCTION(cls): - return cls("function") - - @staticmethod - def function() -> typing.Type['Function3']: - return Function3 - __annotations__ = { - "type": type, - "function": function, - } - - function: 'Function3' - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'Function3': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> 'Function3': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - function: 'Function3', - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionNamedToolChoice': - return super().__new__( - cls, - *_args, - function=function, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.function3 import Function3 diff --git a/launch/api_client/model/chat_completion_request_assistant_message.py b/launch/api_client/model/chat_completion_request_assistant_message.py deleted file mode 100644 index d3b879b3..00000000 --- a/launch/api_client/model/chat_completion_request_assistant_message.py +++ /dev/null @@ -1,249 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestAssistantMessage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "role", - } - - class properties: - - - class role( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "assistant": "ASSISTANT", - } - - @schemas.classproperty - def ASSISTANT(cls): - return cls("assistant") - - - class content( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - Content, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class refusal( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'refusal': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def audio() -> typing.Type['Audio']: - return Audio - - @staticmethod - def tool_calls() -> typing.Type['ChatCompletionMessageToolCallsInput']: - return ChatCompletionMessageToolCallsInput - - @staticmethod - def function_call() -> typing.Type['FunctionCall']: - return FunctionCall - __annotations__ = { - "role": role, - "content": content, - "refusal": refusal, - "name": name, - "audio": audio, - "tool_calls": tool_calls, - "function_call": function_call, - } - - role: MetaOapg.properties.role - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_calls"]) -> 'ChatCompletionMessageToolCallsInput': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> 'FunctionCall': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "name", "audio", "tool_calls", "function_call", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> typing.Union[MetaOapg.properties.refusal, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_calls"]) -> typing.Union['ChatCompletionMessageToolCallsInput', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union['FunctionCall', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "name", "audio", "tool_calls", "function_call", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - role: typing.Union[MetaOapg.properties.role, str, ], - content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - refusal: typing.Union[MetaOapg.properties.refusal, None, str, schemas.Unset] = schemas.unset, - name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, - audio: typing.Union['Audio', schemas.Unset] = schemas.unset, - tool_calls: typing.Union['ChatCompletionMessageToolCallsInput', schemas.Unset] = schemas.unset, - function_call: typing.Union['FunctionCall', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestAssistantMessage': - return super().__new__( - cls, - *_args, - role=role, - content=content, - refusal=refusal, - name=name, - audio=audio, - tool_calls=tool_calls, - function_call=function_call, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.audio import Audio -from launch.api_client.model.chat_completion_message_tool_calls_input import ( - ChatCompletionMessageToolCallsInput, -) -from launch.api_client.model.content import Content -from launch.api_client.model.function_call import FunctionCall diff --git a/launch/api_client/model/chat_completion_request_assistant_message_content_part.py b/launch/api_client/model/chat_completion_request_assistant_message_content_part.py deleted file mode 100644 index 9b39c897..00000000 --- a/launch/api_client/model/chat_completion_request_assistant_message_content_part.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestAssistantMessageContentPart( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ChatCompletionRequestMessageContentPartText, - ChatCompletionRequestMessageContentPartRefusal, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestAssistantMessageContentPart': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_request_message_content_part_refusal import ( - ChatCompletionRequestMessageContentPartRefusal, -) -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) diff --git a/launch/api_client/model/chat_completion_request_developer_message.py b/launch/api_client/model/chat_completion_request_developer_message.py deleted file mode 100644 index c672eddb..00000000 --- a/launch/api_client/model/chat_completion_request_developer_message.py +++ /dev/null @@ -1,178 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestDeveloperMessage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "role", - "content", - } - - class properties: - - - class content( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - Content1, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class role( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "developer": "DEVELOPER", - } - - @schemas.classproperty - def DEVELOPER(cls): - return cls("developer") - - - class name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "content": content, - "role": role, - "name": name, - } - - role: MetaOapg.properties.role - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - role: typing.Union[MetaOapg.properties.role, str, ], - content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestDeveloperMessage': - return super().__new__( - cls, - *_args, - role=role, - content=content, - name=name, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.content1 import Content1 diff --git a/launch/api_client/model/chat_completion_request_function_message.py b/launch/api_client/model/chat_completion_request_function_message.py deleted file mode 100644 index de175511..00000000 --- a/launch/api_client/model/chat_completion_request_function_message.py +++ /dev/null @@ -1,139 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestFunctionMessage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "role", - "name", - } - - class properties: - - - class role( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "function": "FUNCTION", - } - - @schemas.classproperty - def FUNCTION(cls): - return cls("function") - name = schemas.StrSchema - - - class content( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "role": role, - "name": name, - "content": content, - } - - role: MetaOapg.properties.role - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "name", "content", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "name", "content", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - role: typing.Union[MetaOapg.properties.role, str, ], - name: typing.Union[MetaOapg.properties.name, str, ], - content: typing.Union[MetaOapg.properties.content, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestFunctionMessage': - return super().__new__( - cls, - *_args, - role=role, - name=name, - content=content, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/chat_completion_request_message.py b/launch/api_client/model/chat_completion_request_message.py deleted file mode 100644 index cb489ee6..00000000 --- a/launch/api_client/model/chat_completion_request_message.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestMessage( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ChatCompletionRequestDeveloperMessage, - ChatCompletionRequestSystemMessage, - ChatCompletionRequestUserMessage, - ChatCompletionRequestAssistantMessage, - ChatCompletionRequestToolMessage, - ChatCompletionRequestFunctionMessage, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestMessage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_request_assistant_message import ( - ChatCompletionRequestAssistantMessage, -) -from launch.api_client.model.chat_completion_request_developer_message import ( - ChatCompletionRequestDeveloperMessage, -) -from launch.api_client.model.chat_completion_request_function_message import ( - ChatCompletionRequestFunctionMessage, -) -from launch.api_client.model.chat_completion_request_system_message import ( - ChatCompletionRequestSystemMessage, -) -from launch.api_client.model.chat_completion_request_tool_message import ( - ChatCompletionRequestToolMessage, -) -from launch.api_client.model.chat_completion_request_user_message import ( - ChatCompletionRequestUserMessage, -) diff --git a/launch/api_client/model/chat_completion_request_message_content_part_audio.py b/launch/api_client/model/chat_completion_request_message_content_part_audio.py deleted file mode 100644 index 30e8f44f..00000000 --- a/launch/api_client/model/chat_completion_request_message_content_part_audio.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestMessageContentPartAudio( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "input_audio", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "input_audio": "INPUT_AUDIO", - } - - @schemas.classproperty - def INPUT_AUDIO(cls): - return cls("input_audio") - - @staticmethod - def input_audio() -> typing.Type['InputAudio']: - return InputAudio - __annotations__ = { - "type": type, - "input_audio": input_audio, - } - - input_audio: 'InputAudio' - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_audio"]) -> 'InputAudio': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "input_audio", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_audio"]) -> 'InputAudio': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "input_audio", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - input_audio: 'InputAudio', - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestMessageContentPartAudio': - return super().__new__( - cls, - *_args, - input_audio=input_audio, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.input_audio import InputAudio diff --git a/launch/api_client/model/chat_completion_request_message_content_part_file.py b/launch/api_client/model/chat_completion_request_message_content_part_file.py deleted file mode 100644 index 03fecbdd..00000000 --- a/launch/api_client/model/chat_completion_request_message_content_part_file.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestMessageContentPartFile( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "file", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "file": "FILE", - } - - @schemas.classproperty - def FILE(cls): - return cls("file") - - @staticmethod - def file() -> typing.Type['File']: - return File - __annotations__ = { - "type": type, - "file": file, - } - - file: 'File' - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["file"]) -> 'File': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "file", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["file"]) -> 'File': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "file", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - file: 'File', - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestMessageContentPartFile': - return super().__new__( - cls, - *_args, - file=file, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.file import File diff --git a/launch/api_client/model/chat_completion_request_message_content_part_image.py b/launch/api_client/model/chat_completion_request_message_content_part_image.py deleted file mode 100644 index a3a6b355..00000000 --- a/launch/api_client/model/chat_completion_request_message_content_part_image.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestMessageContentPartImage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "image_url", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "image_url": "IMAGE_URL", - } - - @schemas.classproperty - def IMAGE_URL(cls): - return cls("image_url") - - @staticmethod - def image_url() -> typing.Type['ImageUrl']: - return ImageUrl - __annotations__ = { - "type": type, - "image_url": image_url, - } - - image_url: 'ImageUrl' - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_url"]) -> 'ImageUrl': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "image_url", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_url"]) -> 'ImageUrl': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "image_url", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - image_url: 'ImageUrl', - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestMessageContentPartImage': - return super().__new__( - cls, - *_args, - image_url=image_url, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.image_url import ImageUrl diff --git a/launch/api_client/model/chat_completion_request_message_content_part_refusal.py b/launch/api_client/model/chat_completion_request_message_content_part_refusal.py deleted file mode 100644 index 760f54bf..00000000 --- a/launch/api_client/model/chat_completion_request_message_content_part_refusal.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestMessageContentPartRefusal( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "refusal", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "refusal": "REFUSAL", - } - - @schemas.classproperty - def REFUSAL(cls): - return cls("refusal") - refusal = schemas.StrSchema - __annotations__ = { - "type": type, - "refusal": refusal, - } - - refusal: MetaOapg.properties.refusal - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "refusal", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "refusal", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - refusal: typing.Union[MetaOapg.properties.refusal, str, ], - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestMessageContentPartRefusal': - return super().__new__( - cls, - *_args, - refusal=refusal, - type=type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/chat_completion_request_message_content_part_text.py b/launch/api_client/model/chat_completion_request_message_content_part_text.py deleted file mode 100644 index 5c6d9f64..00000000 --- a/launch/api_client/model/chat_completion_request_message_content_part_text.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestMessageContentPartText( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "text", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "text": "TEXT", - } - - @schemas.classproperty - def TEXT(cls): - return cls("text") - text = schemas.StrSchema - __annotations__ = { - "type": type, - "text": text, - } - - text: MetaOapg.properties.text - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "text", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "text", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - text: typing.Union[MetaOapg.properties.text, str, ], - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestMessageContentPartText': - return super().__new__( - cls, - *_args, - text=text, - type=type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/chat_completion_request_system_message.py b/launch/api_client/model/chat_completion_request_system_message.py deleted file mode 100644 index 2a05618c..00000000 --- a/launch/api_client/model/chat_completion_request_system_message.py +++ /dev/null @@ -1,178 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestSystemMessage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "role", - "content", - } - - class properties: - - - class content( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - Content2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class role( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "system": "SYSTEM", - } - - @schemas.classproperty - def SYSTEM(cls): - return cls("system") - - - class name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "content": content, - "role": role, - "name": name, - } - - role: MetaOapg.properties.role - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - role: typing.Union[MetaOapg.properties.role, str, ], - content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestSystemMessage': - return super().__new__( - cls, - *_args, - role=role, - content=content, - name=name, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.content2 import Content2 diff --git a/launch/api_client/model/chat_completion_request_system_message_content_part.py b/launch/api_client/model/chat_completion_request_system_message_content_part.py deleted file mode 100644 index 0b95016e..00000000 --- a/launch/api_client/model/chat_completion_request_system_message_content_part.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - -ChatCompletionRequestSystemMessageContentPart = schemas.Schema diff --git a/launch/api_client/model/chat_completion_request_tool_message.py b/launch/api_client/model/chat_completion_request_tool_message.py deleted file mode 100644 index 8dbf2896..00000000 --- a/launch/api_client/model/chat_completion_request_tool_message.py +++ /dev/null @@ -1,161 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestToolMessage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "role", - "tool_call_id", - "content", - } - - class properties: - - - class role( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "tool": "TOOL", - } - - @schemas.classproperty - def TOOL(cls): - return cls("tool") - - - class content( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - Content3, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - tool_call_id = schemas.StrSchema - __annotations__ = { - "role": role, - "content": content, - "tool_call_id": tool_call_id, - } - - role: MetaOapg.properties.role - tool_call_id: MetaOapg.properties.tool_call_id - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_call_id"]) -> MetaOapg.properties.tool_call_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "content", "tool_call_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_call_id"]) -> MetaOapg.properties.tool_call_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "content", "tool_call_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - role: typing.Union[MetaOapg.properties.role, str, ], - tool_call_id: typing.Union[MetaOapg.properties.tool_call_id, str, ], - content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestToolMessage': - return super().__new__( - cls, - *_args, - role=role, - tool_call_id=tool_call_id, - content=content, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.content3 import Content3 diff --git a/launch/api_client/model/chat_completion_request_tool_message_content_part.py b/launch/api_client/model/chat_completion_request_tool_message_content_part.py deleted file mode 100644 index 64d1bcd0..00000000 --- a/launch/api_client/model/chat_completion_request_tool_message_content_part.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - -ChatCompletionRequestToolMessageContentPart = schemas.Schema diff --git a/launch/api_client/model/chat_completion_request_user_message.py b/launch/api_client/model/chat_completion_request_user_message.py deleted file mode 100644 index 8e3af6f9..00000000 --- a/launch/api_client/model/chat_completion_request_user_message.py +++ /dev/null @@ -1,178 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestUserMessage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "role", - "content", - } - - class properties: - - - class content( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - Content4, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class role( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "user": "USER", - } - - @schemas.classproperty - def USER(cls): - return cls("user") - - - class name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "content": content, - "role": role, - "name": name, - } - - role: MetaOapg.properties.role - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - role: typing.Union[MetaOapg.properties.role, str, ], - content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestUserMessage': - return super().__new__( - cls, - *_args, - role=role, - content=content, - name=name, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.content4 import Content4 diff --git a/launch/api_client/model/chat_completion_request_user_message_content_part.py b/launch/api_client/model/chat_completion_request_user_message_content_part.py deleted file mode 100644 index 94a74888..00000000 --- a/launch/api_client/model/chat_completion_request_user_message_content_part.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionRequestUserMessageContentPart( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ChatCompletionRequestMessageContentPartText, - ChatCompletionRequestMessageContentPartImage, - ChatCompletionRequestMessageContentPartAudio, - ChatCompletionRequestMessageContentPartFile, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionRequestUserMessageContentPart': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_request_message_content_part_audio import ( - ChatCompletionRequestMessageContentPartAudio, -) -from launch.api_client.model.chat_completion_request_message_content_part_file import ( - ChatCompletionRequestMessageContentPartFile, -) -from launch.api_client.model.chat_completion_request_message_content_part_image import ( - ChatCompletionRequestMessageContentPartImage, -) -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) diff --git a/launch/api_client/model/chat_completion_response_message.py b/launch/api_client/model/chat_completion_response_message.py deleted file mode 100644 index d64ed7d3..00000000 --- a/launch/api_client/model/chat_completion_response_message.py +++ /dev/null @@ -1,238 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionResponseMessage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "role", - } - - class properties: - - - class role( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "assistant": "ASSISTANT", - } - - @schemas.classproperty - def ASSISTANT(cls): - return cls("assistant") - - - class content( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class refusal( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'refusal': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def tool_calls() -> typing.Type['ChatCompletionMessageToolCallsOutput']: - return ChatCompletionMessageToolCallsOutput - - - class annotations( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['Annotation']: - return Annotation - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'annotations': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def function_call() -> typing.Type['FunctionCall']: - return FunctionCall - - @staticmethod - def audio() -> typing.Type['Audio1']: - return Audio1 - __annotations__ = { - "role": role, - "content": content, - "refusal": refusal, - "tool_calls": tool_calls, - "annotations": annotations, - "function_call": function_call, - "audio": audio, - } - - role: MetaOapg.properties.role - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_calls"]) -> 'ChatCompletionMessageToolCallsOutput': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["annotations"]) -> MetaOapg.properties.annotations: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> 'FunctionCall': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio1': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "tool_calls", "annotations", "function_call", "audio", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> typing.Union[MetaOapg.properties.refusal, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_calls"]) -> typing.Union['ChatCompletionMessageToolCallsOutput', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["annotations"]) -> typing.Union[MetaOapg.properties.annotations, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union['FunctionCall', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio1', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "tool_calls", "annotations", "function_call", "audio", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - role: typing.Union[MetaOapg.properties.role, str, ], - content: typing.Union[MetaOapg.properties.content, None, str, schemas.Unset] = schemas.unset, - refusal: typing.Union[MetaOapg.properties.refusal, None, str, schemas.Unset] = schemas.unset, - tool_calls: typing.Union['ChatCompletionMessageToolCallsOutput', schemas.Unset] = schemas.unset, - annotations: typing.Union[MetaOapg.properties.annotations, list, tuple, None, schemas.Unset] = schemas.unset, - function_call: typing.Union['FunctionCall', schemas.Unset] = schemas.unset, - audio: typing.Union['Audio1', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionResponseMessage': - return super().__new__( - cls, - *_args, - role=role, - content=content, - refusal=refusal, - tool_calls=tool_calls, - annotations=annotations, - function_call=function_call, - audio=audio, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.annotation import Annotation -from launch.api_client.model.audio1 import Audio1 -from launch.api_client.model.chat_completion_message_tool_calls_output import ( - ChatCompletionMessageToolCallsOutput, -) -from launch.api_client.model.function_call import FunctionCall diff --git a/launch/api_client/model/chat_completion_stream_options.py b/launch/api_client/model/chat_completion_stream_options.py deleted file mode 100644 index fd16c062..00000000 --- a/launch/api_client/model/chat_completion_stream_options.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionStreamOptions( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class include_usage( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'include_usage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "include_usage": include_usage, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["include_usage"]) -> MetaOapg.properties.include_usage: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["include_usage", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["include_usage"]) -> typing.Union[MetaOapg.properties.include_usage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["include_usage", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - include_usage: typing.Union[MetaOapg.properties.include_usage, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionStreamOptions': - return super().__new__( - cls, - *_args, - include_usage=include_usage, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/chat_completion_stream_response_delta.py b/launch/api_client/model/chat_completion_stream_response_delta.py deleted file mode 100644 index 397aca58..00000000 --- a/launch/api_client/model/chat_completion_stream_response_delta.py +++ /dev/null @@ -1,240 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionStreamResponseDelta( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class content( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def function_call() -> typing.Type['FunctionCall2']: - return FunctionCall2 - - - class tool_calls( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionMessageToolCallChunk']: - return ChatCompletionMessageToolCallChunk - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tool_calls': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class role( - schemas.EnumBase, - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - class MetaOapg: - enum_value_to_name = { - "developer": "DEVELOPER", - "system": "SYSTEM", - "user": "USER", - "assistant": "ASSISTANT", - "tool": "TOOL", - } - - @schemas.classproperty - def DEVELOPER(cls): - return cls("developer") - - @schemas.classproperty - def SYSTEM(cls): - return cls("system") - - @schemas.classproperty - def USER(cls): - return cls("user") - - @schemas.classproperty - def ASSISTANT(cls): - return cls("assistant") - - @schemas.classproperty - def TOOL(cls): - return cls("tool") - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'role': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class refusal( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'refusal': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "content": content, - "function_call": function_call, - "tool_calls": tool_calls, - "role": role, - "refusal": refusal, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> 'FunctionCall2': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_calls"]) -> MetaOapg.properties.tool_calls: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "function_call", "tool_calls", "role", "refusal", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union['FunctionCall2', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_calls"]) -> typing.Union[MetaOapg.properties.tool_calls, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> typing.Union[MetaOapg.properties.role, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> typing.Union[MetaOapg.properties.refusal, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "function_call", "tool_calls", "role", "refusal", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - content: typing.Union[MetaOapg.properties.content, None, str, schemas.Unset] = schemas.unset, - function_call: typing.Union['FunctionCall2', schemas.Unset] = schemas.unset, - tool_calls: typing.Union[MetaOapg.properties.tool_calls, list, tuple, None, schemas.Unset] = schemas.unset, - role: typing.Union[MetaOapg.properties.role, None, str, schemas.Unset] = schemas.unset, - refusal: typing.Union[MetaOapg.properties.refusal, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionStreamResponseDelta': - return super().__new__( - cls, - *_args, - content=content, - function_call=function_call, - tool_calls=tool_calls, - role=role, - refusal=refusal, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_message_tool_call_chunk import ( - ChatCompletionMessageToolCallChunk, -) -from launch.api_client.model.function_call2 import FunctionCall2 diff --git a/launch/api_client/model/chat_completion_token_logprob.py b/launch/api_client/model/chat_completion_token_logprob.py deleted file mode 100644 index 2217a155..00000000 --- a/launch/api_client/model/chat_completion_token_logprob.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionTokenLogprob( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "top_logprobs", - "logprob", - "bytes", - "token", - } - - class properties: - token = schemas.StrSchema - logprob = schemas.NumberSchema - - - class bytes( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'bytes': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_logprobs( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['TopLogprob']: - return TopLogprob - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['TopLogprob'], typing.List['TopLogprob']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_logprobs': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'TopLogprob': - return super().__getitem__(i) - __annotations__ = { - "token": token, - "logprob": logprob, - "bytes": bytes, - "top_logprobs": top_logprobs, - } - - top_logprobs: MetaOapg.properties.top_logprobs - logprob: MetaOapg.properties.logprob - bytes: MetaOapg.properties.bytes - token: MetaOapg.properties.token - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", "top_logprobs", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", "top_logprobs", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, list, tuple, ], - logprob: typing.Union[MetaOapg.properties.logprob, decimal.Decimal, int, float, ], - bytes: typing.Union[MetaOapg.properties.bytes, list, tuple, None, ], - token: typing.Union[MetaOapg.properties.token, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionTokenLogprob': - return super().__new__( - cls, - *_args, - top_logprobs=top_logprobs, - logprob=logprob, - bytes=bytes, - token=token, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.top_logprob import TopLogprob diff --git a/launch/api_client/model/chat_completion_tool.py b/launch/api_client/model/chat_completion_tool.py deleted file mode 100644 index f87d91d0..00000000 --- a/launch/api_client/model/chat_completion_tool.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionTool( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "function", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "function": "FUNCTION", - } - - @schemas.classproperty - def FUNCTION(cls): - return cls("function") - - @staticmethod - def function() -> typing.Type['FunctionObject']: - return FunctionObject - __annotations__ = { - "type": type, - "function": function, - } - - function: 'FunctionObject' - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'FunctionObject': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> 'FunctionObject': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - function: 'FunctionObject', - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionTool': - return super().__new__( - cls, - *_args, - function=function, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.function_object import FunctionObject diff --git a/launch/api_client/model/chat_completion_tool_choice_option.py b/launch/api_client/model/chat_completion_tool_choice_option.py deleted file mode 100644 index d39b735c..00000000 --- a/launch/api_client/model/chat_completion_tool_choice_option.py +++ /dev/null @@ -1,104 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionToolChoiceOption( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Controls which (if any) tool is called by the model. -`none` means the model will not call any tool and instead generates a message. -`auto` means the model can pick between generating a message or calling one or more tools. -`required` means the model must call one or more tools. -Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - -`none` is the default when no tools are present. `auto` is the default if tools are present. - - """ - - - class MetaOapg: - - - class any_of_0( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "none": "NONE", - "auto": "AUTO", - "required": "REQUIRED", - } - - @schemas.classproperty - def NONE(cls): - return cls("none") - - @schemas.classproperty - def AUTO(cls): - return cls("auto") - - @schemas.classproperty - def REQUIRED(cls): - return cls("required") - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - ChatCompletionNamedToolChoice, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionToolChoiceOption': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_named_tool_choice import ( - ChatCompletionNamedToolChoice, -) diff --git a/launch/api_client/model/chat_completion_v2_request.py b/launch/api_client/model/chat_completion_v2_request.py deleted file mode 100644 index 10542cc7..00000000 --- a/launch/api_client/model/chat_completion_v2_request.py +++ /dev/null @@ -1,1788 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "messages", - "model", - } - - class properties: - - - class messages( - schemas.ListSchema - ): - - - class MetaOapg: - min_items = 1 - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestMessage']: - return ChatCompletionRequestMessage - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessage'], typing.List['ChatCompletionRequestMessage']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'messages': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionRequestMessage': - return super().__getitem__(i) - model = schemas.StrSchema - - - class best_of( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'best_of': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_k( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = -1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_k': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class use_beam_search( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'use_beam_search': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class length_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'length_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class repetition_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'repetition_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class early_stopping( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'early_stopping': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stop_token_ids( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stop_token_ids': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class include_stop_str_in_output( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'include_stop_str_in_output': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ignore_eos( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ignore_eos': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class spaces_between_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'spaces_between_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class echo( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'echo': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class add_generation_prompt( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'add_generation_prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class continue_final_message( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'continue_final_message': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class add_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'add_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class documents( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - - class items( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'items': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'documents': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_kwargs( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'chat_template_kwargs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_json( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'guided_json': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_regex( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_regex': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_choice( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_grammar( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_grammar': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_decoding_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_decoding_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_whitespace_pattern( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_whitespace_pattern': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class priority( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def metadata() -> typing.Type['Metadata']: - return Metadata - - - class temperature( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'temperature': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class user( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'user': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def service_tier() -> typing.Type['ServiceTier']: - return ServiceTier - - @staticmethod - def modalities() -> typing.Type['ResponseModalities']: - return ResponseModalities - - @staticmethod - def reasoning_effort() -> typing.Type['ReasoningEffort']: - return ReasoningEffort - - - class max_completion_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_completion_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class frequency_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'frequency_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class presence_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'presence_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def web_search_options() -> typing.Type['WebSearchOptions']: - return WebSearchOptions - - - class top_logprobs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 20 - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class response_format( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ResponseFormatText, - ResponseFormatJsonSchema, - ResponseFormatJsonObject, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'response_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def audio() -> typing.Type['Audio2']: - return Audio2 - - - class store( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'store': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stream( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stream': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stop() -> typing.Type['StopConfiguration']: - return StopConfiguration - - - class logit_bias( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.IntSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], - ) -> 'logit_bias': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class logprobs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class n( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 128 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'n': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def prediction() -> typing.Type['PredictionContent']: - return PredictionContent - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = -9223372036854775616 - inclusive_minimum = 9223372036854775616 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stream_options() -> typing.Type['ChatCompletionStreamOptions']: - return ChatCompletionStreamOptions - - - class tools( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionTool']: - return ChatCompletionTool - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tools': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def tool_choice() -> typing.Type['ChatCompletionToolChoiceOption']: - return ChatCompletionToolChoiceOption - parallel_tool_calls = schemas.BoolSchema - - - class function_call( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - - class any_of_0( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "none": "NONE", - "auto": "AUTO", - } - - @schemas.classproperty - def NONE(cls): - return cls("none") - - @schemas.classproperty - def AUTO(cls): - return cls("auto") - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - ChatCompletionFunctionCallOption, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'function_call': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class functions( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionFunctions']: - return ChatCompletionFunctions - max_items = 128 - min_items = 1 - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'functions': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "messages": messages, - "model": model, - "best_of": best_of, - "top_k": top_k, - "min_p": min_p, - "use_beam_search": use_beam_search, - "length_penalty": length_penalty, - "repetition_penalty": repetition_penalty, - "early_stopping": early_stopping, - "stop_token_ids": stop_token_ids, - "include_stop_str_in_output": include_stop_str_in_output, - "ignore_eos": ignore_eos, - "min_tokens": min_tokens, - "skip_special_tokens": skip_special_tokens, - "spaces_between_special_tokens": spaces_between_special_tokens, - "echo": echo, - "add_generation_prompt": add_generation_prompt, - "continue_final_message": continue_final_message, - "add_special_tokens": add_special_tokens, - "documents": documents, - "chat_template": chat_template, - "chat_template_kwargs": chat_template_kwargs, - "guided_json": guided_json, - "guided_regex": guided_regex, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_decoding_backend": guided_decoding_backend, - "guided_whitespace_pattern": guided_whitespace_pattern, - "priority": priority, - "metadata": metadata, - "temperature": temperature, - "top_p": top_p, - "user": user, - "service_tier": service_tier, - "modalities": modalities, - "reasoning_effort": reasoning_effort, - "max_completion_tokens": max_completion_tokens, - "frequency_penalty": frequency_penalty, - "presence_penalty": presence_penalty, - "web_search_options": web_search_options, - "top_logprobs": top_logprobs, - "response_format": response_format, - "audio": audio, - "store": store, - "stream": stream, - "stop": stop, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_tokens": max_tokens, - "n": n, - "prediction": prediction, - "seed": seed, - "stream_options": stream_options, - "tools": tools, - "tool_choice": tool_choice, - "parallel_tool_calls": parallel_tool_calls, - "function_call": function_call, - "functions": functions, - } - - messages: MetaOapg.properties.messages - model: MetaOapg.properties.model - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["add_generation_prompt"]) -> MetaOapg.properties.add_generation_prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["continue_final_message"]) -> MetaOapg.properties.continue_final_message: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["documents"]) -> MetaOapg.properties.documents: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> MetaOapg.properties.chat_template_kwargs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> 'Metadata': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["modalities"]) -> 'ResponseModalities': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["reasoning_effort"]) -> 'ReasoningEffort': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_completion_tokens"]) -> MetaOapg.properties.max_completion_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["web_search_options"]) -> 'WebSearchOptions': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio2': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["store"]) -> MetaOapg.properties.store: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prediction"]) -> 'PredictionContent': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tools"]) -> MetaOapg.properties.tools: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_choice"]) -> 'ChatCompletionToolChoiceOption': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> MetaOapg.properties.parallel_tool_calls: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> MetaOapg.properties.function_call: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["functions"]) -> MetaOapg.properties.functions: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["messages", "model", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["add_generation_prompt"]) -> typing.Union[MetaOapg.properties.add_generation_prompt, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["continue_final_message"]) -> typing.Union[MetaOapg.properties.continue_final_message, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["documents"]) -> typing.Union[MetaOapg.properties.documents, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> typing.Union[MetaOapg.properties.chat_template_kwargs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union['Metadata', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["modalities"]) -> typing.Union['ResponseModalities', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["reasoning_effort"]) -> typing.Union['ReasoningEffort', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_completion_tokens"]) -> typing.Union[MetaOapg.properties.max_completion_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["web_search_options"]) -> typing.Union['WebSearchOptions', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> typing.Union[MetaOapg.properties.top_logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio2', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["store"]) -> typing.Union[MetaOapg.properties.store, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prediction"]) -> typing.Union['PredictionContent', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tools"]) -> typing.Union[MetaOapg.properties.tools, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_choice"]) -> typing.Union['ChatCompletionToolChoiceOption', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> typing.Union[MetaOapg.properties.parallel_tool_calls, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union[MetaOapg.properties.function_call, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["functions"]) -> typing.Union[MetaOapg.properties.functions, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["messages", "model", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - messages: typing.Union[MetaOapg.properties.messages, list, tuple, ], - model: typing.Union[MetaOapg.properties.model, str, ], - best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, - length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, - stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, - ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, - min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, - spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, - echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, - add_generation_prompt: typing.Union[MetaOapg.properties.add_generation_prompt, None, bool, schemas.Unset] = schemas.unset, - continue_final_message: typing.Union[MetaOapg.properties.continue_final_message, None, bool, schemas.Unset] = schemas.unset, - add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, - documents: typing.Union[MetaOapg.properties.documents, list, tuple, None, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - chat_template_kwargs: typing.Union[MetaOapg.properties.chat_template_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, - guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, - guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, - priority: typing.Union[MetaOapg.properties.priority, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - metadata: typing.Union['Metadata', schemas.Unset] = schemas.unset, - temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, - service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, - modalities: typing.Union['ResponseModalities', schemas.Unset] = schemas.unset, - reasoning_effort: typing.Union['ReasoningEffort', schemas.Unset] = schemas.unset, - max_completion_tokens: typing.Union[MetaOapg.properties.max_completion_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - web_search_options: typing.Union['WebSearchOptions', schemas.Unset] = schemas.unset, - top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - audio: typing.Union['Audio2', schemas.Unset] = schemas.unset, - store: typing.Union[MetaOapg.properties.store, None, bool, schemas.Unset] = schemas.unset, - stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, - stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, - logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - logprobs: typing.Union[MetaOapg.properties.logprobs, None, bool, schemas.Unset] = schemas.unset, - max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - prediction: typing.Union['PredictionContent', schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, - tools: typing.Union[MetaOapg.properties.tools, list, tuple, None, schemas.Unset] = schemas.unset, - tool_choice: typing.Union['ChatCompletionToolChoiceOption', schemas.Unset] = schemas.unset, - parallel_tool_calls: typing.Union[MetaOapg.properties.parallel_tool_calls, bool, schemas.Unset] = schemas.unset, - function_call: typing.Union[MetaOapg.properties.function_call, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - functions: typing.Union[MetaOapg.properties.functions, list, tuple, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionV2Request': - return super().__new__( - cls, - *_args, - messages=messages, - model=model, - best_of=best_of, - top_k=top_k, - min_p=min_p, - use_beam_search=use_beam_search, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - early_stopping=early_stopping, - stop_token_ids=stop_token_ids, - include_stop_str_in_output=include_stop_str_in_output, - ignore_eos=ignore_eos, - min_tokens=min_tokens, - skip_special_tokens=skip_special_tokens, - spaces_between_special_tokens=spaces_between_special_tokens, - echo=echo, - add_generation_prompt=add_generation_prompt, - continue_final_message=continue_final_message, - add_special_tokens=add_special_tokens, - documents=documents, - chat_template=chat_template, - chat_template_kwargs=chat_template_kwargs, - guided_json=guided_json, - guided_regex=guided_regex, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_decoding_backend=guided_decoding_backend, - guided_whitespace_pattern=guided_whitespace_pattern, - priority=priority, - metadata=metadata, - temperature=temperature, - top_p=top_p, - user=user, - service_tier=service_tier, - modalities=modalities, - reasoning_effort=reasoning_effort, - max_completion_tokens=max_completion_tokens, - frequency_penalty=frequency_penalty, - presence_penalty=presence_penalty, - web_search_options=web_search_options, - top_logprobs=top_logprobs, - response_format=response_format, - audio=audio, - store=store, - stream=stream, - stop=stop, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - prediction=prediction, - seed=seed, - stream_options=stream_options, - tools=tools, - tool_choice=tool_choice, - parallel_tool_calls=parallel_tool_calls, - function_call=function_call, - functions=functions, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.audio2 import Audio2 -from launch.api_client.model.chat_completion_function_call_option import ( - ChatCompletionFunctionCallOption, -) -from launch.api_client.model.chat_completion_functions import ( - ChatCompletionFunctions, -) -from launch.api_client.model.chat_completion_request_message import ( - ChatCompletionRequestMessage, -) -from launch.api_client.model.chat_completion_stream_options import ( - ChatCompletionStreamOptions, -) -from launch.api_client.model.chat_completion_tool import ChatCompletionTool -from launch.api_client.model.chat_completion_tool_choice_option import ( - ChatCompletionToolChoiceOption, -) -from launch.api_client.model.metadata import Metadata -from launch.api_client.model.prediction_content import PredictionContent -from launch.api_client.model.reasoning_effort import ReasoningEffort -from launch.api_client.model.response_format_json_object import ( - ResponseFormatJsonObject, -) -from launch.api_client.model.response_format_json_schema import ( - ResponseFormatJsonSchema, -) -from launch.api_client.model.response_format_text import ResponseFormatText -from launch.api_client.model.response_modalities import ResponseModalities -from launch.api_client.model.service_tier import ServiceTier -from launch.api_client.model.stop_configuration import StopConfiguration -from launch.api_client.model.web_search_options import WebSearchOptions diff --git a/launch/api_client/model/chat_completion_v2_stream_error_chunk.py b/launch/api_client/model/chat_completion_v2_stream_error_chunk.py deleted file mode 100644 index 7e093fe7..00000000 --- a/launch/api_client/model/chat_completion_v2_stream_error_chunk.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ChatCompletionV2StreamErrorChunk( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "error", - } - - class properties: - - @staticmethod - def error() -> typing.Type['StreamError']: - return StreamError - __annotations__ = { - "error": error, - } - - error: 'StreamError' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["error", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["error", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - error: 'StreamError', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ChatCompletionV2StreamErrorChunk': - return super().__new__( - cls, - *_args, - error=error, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.stream_error import StreamError diff --git a/launch/api_client/model/choice.py b/launch/api_client/model/choice.py deleted file mode 100644 index b74ce765..00000000 --- a/launch/api_client/model/choice.py +++ /dev/null @@ -1,165 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Choice( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "finish_reason", - "index", - "message", - "logprobs", - } - - class properties: - - - class finish_reason( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "stop": "STOP", - "length": "LENGTH", - "tool_calls": "TOOL_CALLS", - "content_filter": "CONTENT_FILTER", - "function_call": "FUNCTION_CALL", - } - - @schemas.classproperty - def STOP(cls): - return cls("stop") - - @schemas.classproperty - def LENGTH(cls): - return cls("length") - - @schemas.classproperty - def TOOL_CALLS(cls): - return cls("tool_calls") - - @schemas.classproperty - def CONTENT_FILTER(cls): - return cls("content_filter") - - @schemas.classproperty - def FUNCTION_CALL(cls): - return cls("function_call") - index = schemas.IntSchema - - @staticmethod - def message() -> typing.Type['ChatCompletionResponseMessage']: - return ChatCompletionResponseMessage - - @staticmethod - def logprobs() -> typing.Type['Logprobs']: - return Logprobs - __annotations__ = { - "finish_reason": finish_reason, - "index": index, - "message": message, - "logprobs": logprobs, - } - - finish_reason: MetaOapg.properties.finish_reason - index: MetaOapg.properties.index - message: 'ChatCompletionResponseMessage' - logprobs: 'Logprobs' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["message"]) -> 'ChatCompletionResponseMessage': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "message", "logprobs", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> 'ChatCompletionResponseMessage': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "message", "logprobs", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - finish_reason: typing.Union[MetaOapg.properties.finish_reason, str, ], - index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], - message: 'ChatCompletionResponseMessage', - logprobs: 'Logprobs', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Choice': - return super().__new__( - cls, - *_args, - finish_reason=finish_reason, - index=index, - message=message, - logprobs=logprobs, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_response_message import ( - ChatCompletionResponseMessage, -) -from launch.api_client.model.logprobs import Logprobs diff --git a/launch/api_client/model/choice1.py b/launch/api_client/model/choice1.py deleted file mode 100644 index ee27ba62..00000000 --- a/launch/api_client/model/choice1.py +++ /dev/null @@ -1,178 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Choice1( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "finish_reason", - "delta", - "index", - } - - class properties: - - @staticmethod - def delta() -> typing.Type['ChatCompletionStreamResponseDelta']: - return ChatCompletionStreamResponseDelta - - - class finish_reason( - schemas.EnumBase, - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - class MetaOapg: - enum_value_to_name = { - "stop": "STOP", - "length": "LENGTH", - "tool_calls": "TOOL_CALLS", - "content_filter": "CONTENT_FILTER", - "function_call": "FUNCTION_CALL", - } - - @schemas.classproperty - def STOP(cls): - return cls("stop") - - @schemas.classproperty - def LENGTH(cls): - return cls("length") - - @schemas.classproperty - def TOOL_CALLS(cls): - return cls("tool_calls") - - @schemas.classproperty - def CONTENT_FILTER(cls): - return cls("content_filter") - - @schemas.classproperty - def FUNCTION_CALL(cls): - return cls("function_call") - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'finish_reason': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - index = schemas.IntSchema - - @staticmethod - def logprobs() -> typing.Type['Logprobs']: - return Logprobs - __annotations__ = { - "delta": delta, - "finish_reason": finish_reason, - "index": index, - "logprobs": logprobs, - } - - finish_reason: MetaOapg.properties.finish_reason - delta: 'ChatCompletionStreamResponseDelta' - index: MetaOapg.properties.index - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["delta"]) -> 'ChatCompletionStreamResponseDelta': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["delta", "finish_reason", "index", "logprobs", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["delta"]) -> 'ChatCompletionStreamResponseDelta': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union['Logprobs', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["delta", "finish_reason", "index", "logprobs", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - finish_reason: typing.Union[MetaOapg.properties.finish_reason, None, str, ], - delta: 'ChatCompletionStreamResponseDelta', - index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], - logprobs: typing.Union['Logprobs', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Choice1': - return super().__new__( - cls, - *_args, - finish_reason=finish_reason, - delta=delta, - index=index, - logprobs=logprobs, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_stream_response_delta import ( - ChatCompletionStreamResponseDelta, -) -from launch.api_client.model.logprobs import Logprobs diff --git a/launch/api_client/model/choice2.py b/launch/api_client/model/choice2.py deleted file mode 100644 index c5843aa9..00000000 --- a/launch/api_client/model/choice2.py +++ /dev/null @@ -1,149 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Choice2( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "finish_reason", - "index", - "text", - "logprobs", - } - - class properties: - - - class finish_reason( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "stop": "STOP", - "length": "LENGTH", - "content_filter": "CONTENT_FILTER", - } - - @schemas.classproperty - def STOP(cls): - return cls("stop") - - @schemas.classproperty - def LENGTH(cls): - return cls("length") - - @schemas.classproperty - def CONTENT_FILTER(cls): - return cls("content_filter") - index = schemas.IntSchema - - @staticmethod - def logprobs() -> typing.Type['Logprobs2']: - return Logprobs2 - text = schemas.StrSchema - __annotations__ = { - "finish_reason": finish_reason, - "index": index, - "logprobs": logprobs, - "text": text, - } - - finish_reason: MetaOapg.properties.finish_reason - index: MetaOapg.properties.index - text: MetaOapg.properties.text - logprobs: 'Logprobs2' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs2': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "logprobs", "text", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs2': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "logprobs", "text", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - finish_reason: typing.Union[MetaOapg.properties.finish_reason, str, ], - index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], - text: typing.Union[MetaOapg.properties.text, str, ], - logprobs: 'Logprobs2', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Choice2': - return super().__new__( - cls, - *_args, - finish_reason=finish_reason, - index=index, - text=text, - logprobs=logprobs, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.logprobs2 import Logprobs2 diff --git a/launch/api_client/model/clone_model_bundle_v1_request.py b/launch/api_client/model/clone_model_bundle_v1_request.py deleted file mode 100644 index 299cdd13..00000000 --- a/launch/api_client/model/clone_model_bundle_v1_request.py +++ /dev/null @@ -1,127 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CloneModelBundleV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for cloning a Model Bundle from another one. - """ - - - class MetaOapg: - required = { - "original_model_bundle_id", - } - - class properties: - original_model_bundle_id = schemas.StrSchema - - - class new_app_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'new_app_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "original_model_bundle_id": original_model_bundle_id, - "new_app_config": new_app_config, - } - - original_model_bundle_id: MetaOapg.properties.original_model_bundle_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["new_app_config"]) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - original_model_bundle_id: typing.Union[MetaOapg.properties.original_model_bundle_id, str, ], - new_app_config: typing.Union[MetaOapg.properties.new_app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CloneModelBundleV1Request': - return super().__new__( - cls, - *_args, - original_model_bundle_id=original_model_bundle_id, - new_app_config=new_app_config, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/clone_model_bundle_v2_request.py b/launch/api_client/model/clone_model_bundle_v2_request.py deleted file mode 100644 index 67602165..00000000 --- a/launch/api_client/model/clone_model_bundle_v2_request.py +++ /dev/null @@ -1,127 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CloneModelBundleV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for cloning a Model Bundle from another one. - """ - - - class MetaOapg: - required = { - "original_model_bundle_id", - } - - class properties: - original_model_bundle_id = schemas.StrSchema - - - class new_app_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'new_app_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "original_model_bundle_id": original_model_bundle_id, - "new_app_config": new_app_config, - } - - original_model_bundle_id: MetaOapg.properties.original_model_bundle_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["new_app_config"]) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - original_model_bundle_id: typing.Union[MetaOapg.properties.original_model_bundle_id, str, ], - new_app_config: typing.Union[MetaOapg.properties.new_app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CloneModelBundleV2Request': - return super().__new__( - cls, - *_args, - original_model_bundle_id=original_model_bundle_id, - new_app_config=new_app_config, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/cloudpickle_artifact_flavor.py b/launch/api_client/model/cloudpickle_artifact_flavor.py deleted file mode 100644 index 669356c3..00000000 --- a/launch/api_client/model/cloudpickle_artifact_flavor.py +++ /dev/null @@ -1,265 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CloudpickleArtifactFlavor( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. - """ - - - class MetaOapg: - required = { - "flavor", - "requirements", - "framework", - "load_model_fn", - "location", - "load_predict_fn", - } - - class properties: - - - class requirements( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'requirements': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class framework( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - PytorchFramework, - TensorflowFramework, - CustomFramework, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'framework': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - location = schemas.StrSchema - - - class flavor( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "cloudpickle_artifact": "CLOUDPICKLE_ARTIFACT", - } - - @schemas.classproperty - def CLOUDPICKLE_ARTIFACT(cls): - return cls("cloudpickle_artifact") - load_predict_fn = schemas.StrSchema - load_model_fn = schemas.StrSchema - - - class app_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'app_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "requirements": requirements, - "framework": framework, - "location": location, - "flavor": flavor, - "load_predict_fn": load_predict_fn, - "load_model_fn": load_model_fn, - "app_config": app_config, - } - - flavor: MetaOapg.properties.flavor - requirements: MetaOapg.properties.requirements - framework: MetaOapg.properties.framework - load_model_fn: MetaOapg.properties.load_model_fn - location: MetaOapg.properties.location - load_predict_fn: MetaOapg.properties.load_predict_fn - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_predict_fn"]) -> MetaOapg.properties.load_predict_fn: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn", "load_model_fn", "app_config", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_predict_fn"]) -> MetaOapg.properties.load_predict_fn: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn", "load_model_fn", "app_config", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - flavor: typing.Union[MetaOapg.properties.flavor, str, ], - requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], - framework: typing.Union[MetaOapg.properties.framework, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - load_model_fn: typing.Union[MetaOapg.properties.load_model_fn, str, ], - location: typing.Union[MetaOapg.properties.location, str, ], - load_predict_fn: typing.Union[MetaOapg.properties.load_predict_fn, str, ], - app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CloudpickleArtifactFlavor': - return super().__new__( - cls, - *_args, - flavor=flavor, - requirements=requirements, - framework=framework, - load_model_fn=load_model_fn, - location=location, - load_predict_fn=load_predict_fn, - app_config=app_config, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.custom_framework import CustomFramework -from launch.api_client.model.pytorch_framework import PytorchFramework -from launch.api_client.model.tensorflow_framework import TensorflowFramework diff --git a/launch/api_client/model/completion_output.py b/launch/api_client/model/completion_output.py deleted file mode 100644 index ff1934de..00000000 --- a/launch/api_client/model/completion_output.py +++ /dev/null @@ -1,164 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionOutput( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Represents the output of a completion request to a model. - """ - - - class MetaOapg: - required = { - "num_completion_tokens", - "text", - } - - class properties: - text = schemas.StrSchema - num_completion_tokens = schemas.IntSchema - - - class num_prompt_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_prompt_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokens( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['TokenOutput']: - return TokenOutput - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "text": text, - "num_completion_tokens": num_completion_tokens, - "num_prompt_tokens": num_prompt_tokens, - "tokens": tokens, - } - - num_completion_tokens: MetaOapg.properties.num_completion_tokens - text: MetaOapg.properties.text - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_completion_tokens"]) -> MetaOapg.properties.num_completion_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> MetaOapg.properties.num_prompt_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokens"]) -> MetaOapg.properties.tokens: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["text", "num_completion_tokens", "num_prompt_tokens", "tokens", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_completion_tokens"]) -> MetaOapg.properties.num_completion_tokens: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> typing.Union[MetaOapg.properties.num_prompt_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokens"]) -> typing.Union[MetaOapg.properties.tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["text", "num_completion_tokens", "num_prompt_tokens", "tokens", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - num_completion_tokens: typing.Union[MetaOapg.properties.num_completion_tokens, decimal.Decimal, int, ], - text: typing.Union[MetaOapg.properties.text, str, ], - num_prompt_tokens: typing.Union[MetaOapg.properties.num_prompt_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tokens: typing.Union[MetaOapg.properties.tokens, list, tuple, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionOutput': - return super().__new__( - cls, - *_args, - num_completion_tokens=num_completion_tokens, - text=text, - num_prompt_tokens=num_prompt_tokens, - tokens=tokens, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.token_output import TokenOutput diff --git a/launch/api_client/model/completion_stream_output.py b/launch/api_client/model/completion_stream_output.py deleted file mode 100644 index 2c673495..00000000 --- a/launch/api_client/model/completion_stream_output.py +++ /dev/null @@ -1,168 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionStreamOutput( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "finished", - "text", - } - - class properties: - text = schemas.StrSchema - finished = schemas.BoolSchema - - - class num_prompt_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_prompt_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_completion_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_completion_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def token() -> typing.Type['TokenOutput']: - return TokenOutput - __annotations__ = { - "text": text, - "finished": finished, - "num_prompt_tokens": num_prompt_tokens, - "num_completion_tokens": num_completion_tokens, - "token": token, - } - - finished: MetaOapg.properties.finished - text: MetaOapg.properties.text - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> MetaOapg.properties.num_prompt_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_completion_tokens"]) -> MetaOapg.properties.num_completion_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> 'TokenOutput': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["text", "finished", "num_prompt_tokens", "num_completion_tokens", "token", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> typing.Union[MetaOapg.properties.num_prompt_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_completion_tokens"]) -> typing.Union[MetaOapg.properties.num_completion_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> typing.Union['TokenOutput', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["text", "finished", "num_prompt_tokens", "num_completion_tokens", "token", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - finished: typing.Union[MetaOapg.properties.finished, bool, ], - text: typing.Union[MetaOapg.properties.text, str, ], - num_prompt_tokens: typing.Union[MetaOapg.properties.num_prompt_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - num_completion_tokens: typing.Union[MetaOapg.properties.num_completion_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - token: typing.Union['TokenOutput', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionStreamOutput': - return super().__new__( - cls, - *_args, - finished=finished, - text=text, - num_prompt_tokens=num_prompt_tokens, - num_completion_tokens=num_completion_tokens, - token=token, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.token_output import TokenOutput diff --git a/launch/api_client/model/completion_stream_v1_request.py b/launch/api_client/model/completion_stream_v1_request.py deleted file mode 100644 index 8fb5fe58..00000000 --- a/launch/api_client/model/completion_stream_v1_request.py +++ /dev/null @@ -1,505 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionStreamV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for a stream prompt completion task. - """ - - - class MetaOapg: - required = { - "max_new_tokens", - "temperature", - "prompt", - } - - class properties: - prompt = schemas.StrSchema - max_new_tokens = schemas.IntSchema - - - class temperature( - schemas.NumberSchema - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - - class stop_sequences( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stop_sequences': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class return_token_log_probs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'return_token_log_probs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class presence_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'presence_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class frequency_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'frequency_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_k( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = -1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_k': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class include_stop_str_in_output( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'include_stop_str_in_output': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_json( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'guided_json': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_regex( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_regex': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_choice( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_grammar( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_grammar': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "prompt": prompt, - "max_new_tokens": max_new_tokens, - "temperature": temperature, - "stop_sequences": stop_sequences, - "return_token_log_probs": return_token_log_probs, - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "top_k": top_k, - "top_p": top_p, - "include_stop_str_in_output": include_stop_str_in_output, - "guided_json": guided_json, - "guided_regex": guided_regex, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "skip_special_tokens": skip_special_tokens, - } - - max_new_tokens: MetaOapg.properties.max_new_tokens - temperature: MetaOapg.properties.temperature - prompt: MetaOapg.properties.prompt - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_token_log_probs"]) -> MetaOapg.properties.return_token_log_probs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop_sequences"]) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["return_token_log_probs"]) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - max_new_tokens: typing.Union[MetaOapg.properties.max_new_tokens, decimal.Decimal, int, ], - temperature: typing.Union[MetaOapg.properties.temperature, decimal.Decimal, int, float, ], - prompt: typing.Union[MetaOapg.properties.prompt, str, ], - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, None, schemas.Unset] = schemas.unset, - return_token_log_probs: typing.Union[MetaOapg.properties.return_token_log_probs, None, bool, schemas.Unset] = schemas.unset, - presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, - guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, - skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionStreamV1Request': - return super().__new__( - cls, - *_args, - max_new_tokens=max_new_tokens, - temperature=temperature, - prompt=prompt, - stop_sequences=stop_sequences, - return_token_log_probs=return_token_log_probs, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - top_k=top_k, - top_p=top_p, - include_stop_str_in_output=include_stop_str_in_output, - guided_json=guided_json, - guided_regex=guided_regex, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - skip_special_tokens=skip_special_tokens, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/completion_stream_v1_response.py b/launch/api_client/model/completion_stream_v1_response.py deleted file mode 100644 index 3662050e..00000000 --- a/launch/api_client/model/completion_stream_v1_response.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionStreamV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Error of the response (if any). - """ - - - class MetaOapg: - required = { - "request_id", - } - - class properties: - - - class request_id( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'request_id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def output() -> typing.Type['CompletionStreamOutput']: - return CompletionStreamOutput - - @staticmethod - def error() -> typing.Type['StreamError']: - return StreamError - __annotations__ = { - "request_id": request_id, - "output": output, - "error": error, - } - - request_id: MetaOapg.properties.request_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output"]) -> 'CompletionStreamOutput': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["request_id", "output", "error", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["output"]) -> typing.Union['CompletionStreamOutput', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> typing.Union['StreamError', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["request_id", "output", "error", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - request_id: typing.Union[MetaOapg.properties.request_id, None, str, ], - output: typing.Union['CompletionStreamOutput', schemas.Unset] = schemas.unset, - error: typing.Union['StreamError', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionStreamV1Response': - return super().__new__( - cls, - *_args, - request_id=request_id, - output=output, - error=error, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.completion_stream_output import ( - CompletionStreamOutput, -) -from launch.api_client.model.stream_error import StreamError diff --git a/launch/api_client/model/completion_sync_v1_request.py b/launch/api_client/model/completion_sync_v1_request.py deleted file mode 100644 index f0e775aa..00000000 --- a/launch/api_client/model/completion_sync_v1_request.py +++ /dev/null @@ -1,505 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionSyncV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for a synchronous prompt completion task. - """ - - - class MetaOapg: - required = { - "max_new_tokens", - "temperature", - "prompt", - } - - class properties: - prompt = schemas.StrSchema - max_new_tokens = schemas.IntSchema - - - class temperature( - schemas.NumberSchema - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - - class stop_sequences( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stop_sequences': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class return_token_log_probs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'return_token_log_probs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class presence_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'presence_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class frequency_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'frequency_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_k( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = -1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_k': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class include_stop_str_in_output( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'include_stop_str_in_output': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_json( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'guided_json': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_regex( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_regex': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_choice( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_grammar( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_grammar': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "prompt": prompt, - "max_new_tokens": max_new_tokens, - "temperature": temperature, - "stop_sequences": stop_sequences, - "return_token_log_probs": return_token_log_probs, - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "top_k": top_k, - "top_p": top_p, - "include_stop_str_in_output": include_stop_str_in_output, - "guided_json": guided_json, - "guided_regex": guided_regex, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "skip_special_tokens": skip_special_tokens, - } - - max_new_tokens: MetaOapg.properties.max_new_tokens - temperature: MetaOapg.properties.temperature - prompt: MetaOapg.properties.prompt - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_token_log_probs"]) -> MetaOapg.properties.return_token_log_probs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop_sequences"]) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["return_token_log_probs"]) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - max_new_tokens: typing.Union[MetaOapg.properties.max_new_tokens, decimal.Decimal, int, ], - temperature: typing.Union[MetaOapg.properties.temperature, decimal.Decimal, int, float, ], - prompt: typing.Union[MetaOapg.properties.prompt, str, ], - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, None, schemas.Unset] = schemas.unset, - return_token_log_probs: typing.Union[MetaOapg.properties.return_token_log_probs, None, bool, schemas.Unset] = schemas.unset, - presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, - guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, - skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionSyncV1Request': - return super().__new__( - cls, - *_args, - max_new_tokens=max_new_tokens, - temperature=temperature, - prompt=prompt, - stop_sequences=stop_sequences, - return_token_log_probs=return_token_log_probs, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - top_k=top_k, - top_p=top_p, - include_stop_str_in_output=include_stop_str_in_output, - guided_json=guided_json, - guided_regex=guided_regex, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - skip_special_tokens=skip_special_tokens, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/completion_sync_v1_response.py b/launch/api_client/model/completion_sync_v1_response.py deleted file mode 100644 index 491f81bd..00000000 --- a/launch/api_client/model/completion_sync_v1_response.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionSyncV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for a synchronous prompt completion. - """ - - - class MetaOapg: - - class properties: - - - class request_id( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'request_id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def output() -> typing.Type['CompletionOutput']: - return CompletionOutput - __annotations__ = { - "request_id": request_id, - "output": output, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output"]) -> 'CompletionOutput': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["request_id", "output", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["request_id"]) -> typing.Union[MetaOapg.properties.request_id, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["output"]) -> typing.Union['CompletionOutput', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["request_id", "output", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - request_id: typing.Union[MetaOapg.properties.request_id, None, str, schemas.Unset] = schemas.unset, - output: typing.Union['CompletionOutput', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionSyncV1Response': - return super().__new__( - cls, - *_args, - request_id=request_id, - output=output, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.completion_output import CompletionOutput diff --git a/launch/api_client/model/completion_tokens_details.py b/launch/api_client/model/completion_tokens_details.py deleted file mode 100644 index 77ad3d2c..00000000 --- a/launch/api_client/model/completion_tokens_details.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionTokensDetails( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - accepted_prediction_tokens = schemas.IntSchema - audio_tokens = schemas.IntSchema - reasoning_tokens = schemas.IntSchema - rejected_prediction_tokens = schemas.IntSchema - __annotations__ = { - "accepted_prediction_tokens": accepted_prediction_tokens, - "audio_tokens": audio_tokens, - "reasoning_tokens": reasoning_tokens, - "rejected_prediction_tokens": rejected_prediction_tokens, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["accepted_prediction_tokens"]) -> MetaOapg.properties.accepted_prediction_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["audio_tokens"]) -> MetaOapg.properties.audio_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["reasoning_tokens"]) -> MetaOapg.properties.reasoning_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["rejected_prediction_tokens"]) -> MetaOapg.properties.rejected_prediction_tokens: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["accepted_prediction_tokens", "audio_tokens", "reasoning_tokens", "rejected_prediction_tokens", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["accepted_prediction_tokens"]) -> typing.Union[MetaOapg.properties.accepted_prediction_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["audio_tokens"]) -> typing.Union[MetaOapg.properties.audio_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["reasoning_tokens"]) -> typing.Union[MetaOapg.properties.reasoning_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["rejected_prediction_tokens"]) -> typing.Union[MetaOapg.properties.rejected_prediction_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["accepted_prediction_tokens", "audio_tokens", "reasoning_tokens", "rejected_prediction_tokens", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - accepted_prediction_tokens: typing.Union[MetaOapg.properties.accepted_prediction_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, - audio_tokens: typing.Union[MetaOapg.properties.audio_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, - reasoning_tokens: typing.Union[MetaOapg.properties.reasoning_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, - rejected_prediction_tokens: typing.Union[MetaOapg.properties.rejected_prediction_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionTokensDetails': - return super().__new__( - cls, - *_args, - accepted_prediction_tokens=accepted_prediction_tokens, - audio_tokens=audio_tokens, - reasoning_tokens=reasoning_tokens, - rejected_prediction_tokens=rejected_prediction_tokens, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/completion_usage.py b/launch/api_client/model/completion_usage.py deleted file mode 100644 index b064817e..00000000 --- a/launch/api_client/model/completion_usage.py +++ /dev/null @@ -1,138 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionUsage( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "completion_tokens", - "prompt_tokens", - "total_tokens", - } - - class properties: - completion_tokens = schemas.IntSchema - prompt_tokens = schemas.IntSchema - total_tokens = schemas.IntSchema - - @staticmethod - def completion_tokens_details() -> typing.Type['CompletionTokensDetails']: - return CompletionTokensDetails - - @staticmethod - def prompt_tokens_details() -> typing.Type['PromptTokensDetails']: - return PromptTokensDetails - __annotations__ = { - "completion_tokens": completion_tokens, - "prompt_tokens": prompt_tokens, - "total_tokens": total_tokens, - "completion_tokens_details": completion_tokens_details, - "prompt_tokens_details": prompt_tokens_details, - } - - completion_tokens: MetaOapg.properties.completion_tokens - prompt_tokens: MetaOapg.properties.prompt_tokens - total_tokens: MetaOapg.properties.total_tokens - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["completion_tokens"]) -> MetaOapg.properties.completion_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt_tokens"]) -> MetaOapg.properties.prompt_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["total_tokens"]) -> MetaOapg.properties.total_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["completion_tokens_details"]) -> 'CompletionTokensDetails': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt_tokens_details"]) -> 'PromptTokensDetails': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["completion_tokens", "prompt_tokens", "total_tokens", "completion_tokens_details", "prompt_tokens_details", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["completion_tokens"]) -> MetaOapg.properties.completion_tokens: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt_tokens"]) -> MetaOapg.properties.prompt_tokens: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["total_tokens"]) -> MetaOapg.properties.total_tokens: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["completion_tokens_details"]) -> typing.Union['CompletionTokensDetails', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt_tokens_details"]) -> typing.Union['PromptTokensDetails', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["completion_tokens", "prompt_tokens", "total_tokens", "completion_tokens_details", "prompt_tokens_details", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - completion_tokens: typing.Union[MetaOapg.properties.completion_tokens, decimal.Decimal, int, ], - prompt_tokens: typing.Union[MetaOapg.properties.prompt_tokens, decimal.Decimal, int, ], - total_tokens: typing.Union[MetaOapg.properties.total_tokens, decimal.Decimal, int, ], - completion_tokens_details: typing.Union['CompletionTokensDetails', schemas.Unset] = schemas.unset, - prompt_tokens_details: typing.Union['PromptTokensDetails', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionUsage': - return super().__new__( - cls, - *_args, - completion_tokens=completion_tokens, - prompt_tokens=prompt_tokens, - total_tokens=total_tokens, - completion_tokens_details=completion_tokens_details, - prompt_tokens_details=prompt_tokens_details, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.completion_tokens_details import ( - CompletionTokensDetails, -) -from launch.api_client.model.prompt_tokens_details import PromptTokensDetails diff --git a/launch/api_client/model/completion_v2_request.py b/launch/api_client/model/completion_v2_request.py deleted file mode 100644 index 674ccb64..00000000 --- a/launch/api_client/model/completion_v2_request.py +++ /dev/null @@ -1,1278 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "model", - "prompt", - } - - class properties: - model = schemas.StrSchema - - - class prompt( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - - class any_of_1( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'any_of_1': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - Prompt, - Prompt1, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class best_of( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 20 - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'best_of': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_k( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = -1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_k': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class use_beam_search( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'use_beam_search': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class length_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'length_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class repetition_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'repetition_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class early_stopping( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'early_stopping': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stop_token_ids( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stop_token_ids': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class include_stop_str_in_output( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'include_stop_str_in_output': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ignore_eos( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ignore_eos': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class spaces_between_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'spaces_between_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class add_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'add_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class response_format( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ResponseFormatText, - ResponseFormatJsonSchema, - ResponseFormatJsonObject, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'response_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_json( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'guided_json': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_regex( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_regex': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_choice( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_grammar( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_grammar': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_decoding_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_decoding_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_whitespace_pattern( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_whitespace_pattern': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class echo( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'echo': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class frequency_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'frequency_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class logit_bias( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.IntSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], - ) -> 'logit_bias': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class logprobs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 5 - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class n( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 128 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'n': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class presence_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'presence_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stop() -> typing.Type['StopConfiguration']: - return StopConfiguration - - - class stream( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stream': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stream_options() -> typing.Type['ChatCompletionStreamOptions']: - return ChatCompletionStreamOptions - - - class suffix( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'suffix': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class temperature( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'temperature': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class user( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'user': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "model": model, - "prompt": prompt, - "best_of": best_of, - "top_k": top_k, - "min_p": min_p, - "use_beam_search": use_beam_search, - "length_penalty": length_penalty, - "repetition_penalty": repetition_penalty, - "early_stopping": early_stopping, - "stop_token_ids": stop_token_ids, - "include_stop_str_in_output": include_stop_str_in_output, - "ignore_eos": ignore_eos, - "min_tokens": min_tokens, - "skip_special_tokens": skip_special_tokens, - "spaces_between_special_tokens": spaces_between_special_tokens, - "add_special_tokens": add_special_tokens, - "response_format": response_format, - "guided_json": guided_json, - "guided_regex": guided_regex, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_decoding_backend": guided_decoding_backend, - "guided_whitespace_pattern": guided_whitespace_pattern, - "echo": echo, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_tokens": max_tokens, - "n": n, - "presence_penalty": presence_penalty, - "seed": seed, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "suffix": suffix, - "temperature": temperature, - "top_p": top_p, - "user": user, - } - - model: MetaOapg.properties.model - prompt: MetaOapg.properties.prompt - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["suffix"]) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model: typing.Union[MetaOapg.properties.model, str, ], - prompt: typing.Union[MetaOapg.properties.prompt, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, - length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, - stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, - ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, - min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, - spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, - add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, - response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, - guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, - guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, - echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, - frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - logprobs: typing.Union[MetaOapg.properties.logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, - stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, - stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, - suffix: typing.Union[MetaOapg.properties.suffix, None, str, schemas.Unset] = schemas.unset, - temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionV2Request': - return super().__new__( - cls, - *_args, - model=model, - prompt=prompt, - best_of=best_of, - top_k=top_k, - min_p=min_p, - use_beam_search=use_beam_search, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - early_stopping=early_stopping, - stop_token_ids=stop_token_ids, - include_stop_str_in_output=include_stop_str_in_output, - ignore_eos=ignore_eos, - min_tokens=min_tokens, - skip_special_tokens=skip_special_tokens, - spaces_between_special_tokens=spaces_between_special_tokens, - add_special_tokens=add_special_tokens, - response_format=response_format, - guided_json=guided_json, - guided_regex=guided_regex, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_decoding_backend=guided_decoding_backend, - guided_whitespace_pattern=guided_whitespace_pattern, - echo=echo, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - presence_penalty=presence_penalty, - seed=seed, - stop=stop, - stream=stream, - stream_options=stream_options, - suffix=suffix, - temperature=temperature, - top_p=top_p, - user=user, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_stream_options import ( - ChatCompletionStreamOptions, -) -from launch.api_client.model.prompt import Prompt -from launch.api_client.model.prompt1 import Prompt1 -from launch.api_client.model.response_format_json_object import ( - ResponseFormatJsonObject, -) -from launch.api_client.model.response_format_json_schema import ( - ResponseFormatJsonSchema, -) -from launch.api_client.model.response_format_text import ResponseFormatText -from launch.api_client.model.stop_configuration import StopConfiguration diff --git a/launch/api_client/model/completion_v2_stream_error_chunk.py b/launch/api_client/model/completion_v2_stream_error_chunk.py deleted file mode 100644 index 085b8335..00000000 --- a/launch/api_client/model/completion_v2_stream_error_chunk.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CompletionV2StreamErrorChunk( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "error", - } - - class properties: - - @staticmethod - def error() -> typing.Type['StreamError']: - return StreamError - __annotations__ = { - "error": error, - } - - error: 'StreamError' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["error", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["error", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - error: 'StreamError', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CompletionV2StreamErrorChunk': - return super().__new__( - cls, - *_args, - error=error, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.stream_error import StreamError diff --git a/launch/api_client/model/content.py b/launch/api_client/model/content.py deleted file mode 100644 index d444926a..00000000 --- a/launch/api_client/model/content.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Content( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. - """ - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestAssistantMessageContentPart']: - return ChatCompletionRequestAssistantMessageContentPart - min_items = 1 - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - -from launch.api_client.model.chat_completion_request_assistant_message_content_part import ( - ChatCompletionRequestAssistantMessageContentPart, -) diff --git a/launch/api_client/model/content1.py b/launch/api_client/model/content1.py deleted file mode 100644 index ccd7efd3..00000000 --- a/launch/api_client/model/content1.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Content1( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An array of content parts with a defined type. For developer messages, only type `text` is supported. - """ - - - class MetaOapg: - min_items = 1 - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: - return ChatCompletionRequestMessageContentPartText - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Content1': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': - return super().__getitem__(i) - -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) diff --git a/launch/api_client/model/content2.py b/launch/api_client/model/content2.py deleted file mode 100644 index c103bcb7..00000000 --- a/launch/api_client/model/content2.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Content2( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An array of content parts with a defined type. For system messages, only type `text` is supported. - """ - - - class MetaOapg: - min_items = 1 - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: - return ChatCompletionRequestMessageContentPartText - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Content2': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': - return super().__getitem__(i) - -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) diff --git a/launch/api_client/model/content3.py b/launch/api_client/model/content3.py deleted file mode 100644 index 95d71f60..00000000 --- a/launch/api_client/model/content3.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Content3( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An array of content parts with a defined type. For tool messages, only type `text` is supported. - """ - - - class MetaOapg: - min_items = 1 - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: - return ChatCompletionRequestMessageContentPartText - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Content3': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': - return super().__getitem__(i) - -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) diff --git a/launch/api_client/model/content4.py b/launch/api_client/model/content4.py deleted file mode 100644 index 313ebc22..00000000 --- a/launch/api_client/model/content4.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Content4( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs. - """ - - - class MetaOapg: - min_items = 1 - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestUserMessageContentPart']: - return ChatCompletionRequestUserMessageContentPart - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionRequestUserMessageContentPart'], typing.List['ChatCompletionRequestUserMessageContentPart']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Content4': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionRequestUserMessageContentPart': - return super().__getitem__(i) - -from launch.api_client.model.chat_completion_request_user_message_content_part import ( - ChatCompletionRequestUserMessageContentPart, -) diff --git a/launch/api_client/model/content8.py b/launch/api_client/model/content8.py deleted file mode 100644 index 9edcdaef..00000000 --- a/launch/api_client/model/content8.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Content8( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs. - """ - - - class MetaOapg: - min_items = 1 - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: - return ChatCompletionRequestMessageContentPartText - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Content8': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': - return super().__getitem__(i) - -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) diff --git a/launch/api_client/model/create_async_task_v1_response.py b/launch/api_client/model/create_async_task_v1_response.py deleted file mode 100644 index 87e7482b..00000000 --- a/launch/api_client/model/create_async_task_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateAsyncTaskV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "task_id", - } - - class properties: - task_id = schemas.StrSchema - __annotations__ = { - "task_id": task_id, - } - - task_id: MetaOapg.properties.task_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["task_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["task_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - task_id: typing.Union[MetaOapg.properties.task_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateAsyncTaskV1Response': - return super().__new__( - cls, - *_args, - task_id=task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_model_config.py b/launch/api_client/model/create_batch_completions_model_config.py deleted file mode 100644 index 73ec228c..00000000 --- a/launch/api_client/model/create_batch_completions_model_config.py +++ /dev/null @@ -1,247 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsModelConfig(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "model", - "labels", - } - - class properties: - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[ - str, - ], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[ - str, - ], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - model = schemas.StrSchema - checkpoint_path = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def quantize() -> typing.Type["Quantization"]: - return Quantization - - seed = schemas.IntSchema - __annotations__ = { - "labels": labels, - "model": model, - "checkpoint_path": checkpoint_path, - "num_shards": num_shards, - "quantize": quantize, - "seed": seed, - } - - model: MetaOapg.properties.model - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "model", - "checkpoint_path", - "num_shards", - "quantize", - "seed", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union["Quantization", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["seed"] - ) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "model", - "checkpoint_path", - "num_shards", - "quantize", - "seed", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model: typing.Union[ - MetaOapg.properties.model, - str, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsModelConfig": - return super().__new__( - cls, - *_args, - model=model, - labels=labels, - checkpoint_path=checkpoint_path, - num_shards=num_shards, - quantize=quantize, - seed=seed, - _configuration=_configuration, - **kwargs, - ) - - -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_batch_completions_request.py b/launch/api_client/model/create_batch_completions_request.py deleted file mode 100644 index 746b7ea9..00000000 --- a/launch/api_client/model/create_batch_completions_request.py +++ /dev/null @@ -1,251 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsRequest(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for batch completions. - """ - - class MetaOapg: - required = { - "model_config", - "output_data_path", - } - - class properties: - @staticmethod - def model_config() -> typing.Type["CreateBatchCompletionsModelConfig"]: - return CreateBatchCompletionsModelConfig - - output_data_path = schemas.StrSchema - - @staticmethod - def content() -> typing.Type["CreateBatchCompletionsRequestContent"]: - return CreateBatchCompletionsRequestContent - - class data_parallelism(schemas.IntSchema): - class MetaOapg: - inclusive_maximum = 64 - inclusive_minimum = 1 - - input_data_path = schemas.StrSchema - - class max_runtime_sec(schemas.IntSchema): - class MetaOapg: - inclusive_maximum = 172800 - inclusive_minimum = 1 - - @staticmethod - def tool_config() -> typing.Type["ToolConfig"]: - return ToolConfig - - __annotations__ = { - "model_config": model_config, - "output_data_path": output_data_path, - "content": content, - "data_parallelism": data_parallelism, - "input_data_path": input_data_path, - "max_runtime_sec": max_runtime_sec, - "tool_config": tool_config, - } - - model_config: "CreateBatchCompletionsModelConfig" - output_data_path: MetaOapg.properties.output_data_path - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> "CreateBatchCompletionsModelConfig": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> "CreateBatchCompletionsRequestContent": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["data_parallelism"]) -> MetaOapg.properties.data_parallelism: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_runtime_sec"]) -> MetaOapg.properties.max_runtime_sec: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_config"]) -> "ToolConfig": - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_config", - "output_data_path", - "content", - "data_parallelism", - "input_data_path", - "max_runtime_sec", - "tool_config", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> "CreateBatchCompletionsModelConfig": - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["output_data_path"] - ) -> MetaOapg.properties.output_data_path: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["content"] - ) -> typing.Union["CreateBatchCompletionsRequestContent", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["data_parallelism"] - ) -> typing.Union[MetaOapg.properties.data_parallelism, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["input_data_path"] - ) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_runtime_sec"] - ) -> typing.Union[MetaOapg.properties.max_runtime_sec, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tool_config"] - ) -> typing.Union["ToolConfig", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_config", - "output_data_path", - "content", - "data_parallelism", - "input_data_path", - "max_runtime_sec", - "tool_config", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_config: "CreateBatchCompletionsModelConfig", - output_data_path: typing.Union[ - MetaOapg.properties.output_data_path, - str, - ], - content: typing.Union["CreateBatchCompletionsRequestContent", schemas.Unset] = schemas.unset, - data_parallelism: typing.Union[ - MetaOapg.properties.data_parallelism, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - input_data_path: typing.Union[MetaOapg.properties.input_data_path, str, schemas.Unset] = schemas.unset, - max_runtime_sec: typing.Union[ - MetaOapg.properties.max_runtime_sec, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - tool_config: typing.Union["ToolConfig", schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsRequest": - return super().__new__( - cls, - *_args, - model_config=model_config, - output_data_path=output_data_path, - content=content, - data_parallelism=data_parallelism, - input_data_path=input_data_path, - max_runtime_sec=max_runtime_sec, - tool_config=tool_config, - _configuration=_configuration, - **kwargs, - ) - - -from launch.api_client.model.create_batch_completions_model_config import ( - CreateBatchCompletionsModelConfig, -) -from launch.api_client.model.create_batch_completions_request_content import ( - CreateBatchCompletionsRequestContent, -) -from launch.api_client.model.tool_config import ToolConfig diff --git a/launch/api_client/model/create_batch_completions_request_content.py b/launch/api_client/model/create_batch_completions_request_content.py deleted file mode 100644 index c1e055be..00000000 --- a/launch/api_client/model/create_batch_completions_request_content.py +++ /dev/null @@ -1,347 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsRequestContent(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "max_new_tokens", - "temperature", - "prompts", - } - - class properties: - max_new_tokens = schemas.IntSchema - - class prompts(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "prompts": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class temperature(schemas.NumberSchema): - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - class frequency_penalty(schemas.NumberSchema): - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - class presence_penalty(schemas.NumberSchema): - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - return_token_log_probs = schemas.BoolSchema - - class stop_sequences(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "stop_sequences": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class top_k(schemas.IntSchema): - class MetaOapg: - inclusive_minimum = -1 - - class top_p(schemas.NumberSchema): - class MetaOapg: - inclusive_maximum = 1.0 - - __annotations__ = { - "max_new_tokens": max_new_tokens, - "prompts": prompts, - "temperature": temperature, - "frequency_penalty": frequency_penalty, - "presence_penalty": presence_penalty, - "return_token_log_probs": return_token_log_probs, - "stop_sequences": stop_sequences, - "top_k": top_k, - "top_p": top_p, - } - - max_new_tokens: MetaOapg.properties.max_new_tokens - temperature: MetaOapg.properties.temperature - prompts: MetaOapg.properties.prompts - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> MetaOapg.properties.frequency_penalty: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> MetaOapg.properties.return_token_log_probs: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompts", - "temperature", - "frequency_penalty", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["stop_sequences"] - ) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_k"] - ) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_p"] - ) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompts", - "temperature", - "frequency_penalty", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_new_tokens: typing.Union[ - MetaOapg.properties.max_new_tokens, - decimal.Decimal, - int, - ], - temperature: typing.Union[ - MetaOapg.properties.temperature, - decimal.Decimal, - int, - float, - ], - prompts: typing.Union[ - MetaOapg.properties.prompts, - list, - tuple, - ], - frequency_penalty: typing.Union[ - MetaOapg.properties.frequency_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - presence_penalty: typing.Union[ - MetaOapg.properties.presence_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - return_token_log_probs: typing.Union[ - MetaOapg.properties.return_token_log_probs, bool, schemas.Unset - ] = schemas.unset, - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsRequestContent": - return super().__new__( - cls, - *_args, - max_new_tokens=max_new_tokens, - temperature=temperature, - prompts=prompts, - frequency_penalty=frequency_penalty, - presence_penalty=presence_penalty, - return_token_log_probs=return_token_log_probs, - stop_sequences=stop_sequences, - top_k=top_k, - top_p=top_p, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_response.py b/launch/api_client/model/create_batch_completions_response.py deleted file mode 100644 index 26631968..00000000 --- a/launch/api_client/model/create_batch_completions_response.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "job_id", - } - - class properties: - job_id = schemas.StrSchema - __annotations__ = { - "job_id": job_id, - } - - job_id: MetaOapg.properties.job_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "job_id", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "job_id", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - job_id: typing.Union[ - MetaOapg.properties.job_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsResponse": - return super().__new__( - cls, - *_args, - job_id=job_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_v1_model_config.py b/launch/api_client/model/create_batch_completions_v1_model_config.py deleted file mode 100644 index 08ec16ae..00000000 --- a/launch/api_client/model/create_batch_completions_v1_model_config.py +++ /dev/null @@ -1,1328 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsV1ModelConfig( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "model", - } - - class properties: - model = schemas.StrSchema - - - class max_model_len( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_model_len': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_seqs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_seqs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enforce_eager( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enforce_eager': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class trust_remote_code( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'trust_remote_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class pipeline_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'pipeline_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tensor_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tensor_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_log_requests( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_log_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tool_call_parser( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tool_call_parser': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_auto_tool_choice( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_auto_tool_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class config_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'config_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer_mode( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_mode': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class limit_mm_per_prompt( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'limit_mm_per_prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_batched_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_batched_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class code_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'code_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class rope_scaling( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'rope_scaling': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class tokenizer_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization_param_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization_param_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_seq_len_to_capture( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_seq_len_to_capture': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_sliding_window( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_sliding_window': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_tokenizer_init( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_tokenizer_init': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class served_model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'served_model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class override_neuron_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'override_neuron_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class mm_processor_kwargs( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'mm_processor_kwargs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class block_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'block_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class gpu_memory_utilization( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpu_memory_utilization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class swap_space( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'swap_space': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cache_dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cache_dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_gpu_blocks_override( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_gpu_blocks_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_prefix_caching( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_prefix_caching': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_shards( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_shards': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_context_length( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_context_length': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class response_role( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'response_role': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "model": model, - "max_model_len": max_model_len, - "max_num_seqs": max_num_seqs, - "enforce_eager": enforce_eager, - "trust_remote_code": trust_remote_code, - "pipeline_parallel_size": pipeline_parallel_size, - "tensor_parallel_size": tensor_parallel_size, - "quantization": quantization, - "disable_log_requests": disable_log_requests, - "chat_template": chat_template, - "tool_call_parser": tool_call_parser, - "enable_auto_tool_choice": enable_auto_tool_choice, - "load_format": load_format, - "config_format": config_format, - "tokenizer_mode": tokenizer_mode, - "limit_mm_per_prompt": limit_mm_per_prompt, - "max_num_batched_tokens": max_num_batched_tokens, - "tokenizer": tokenizer, - "dtype": dtype, - "seed": seed, - "revision": revision, - "code_revision": code_revision, - "rope_scaling": rope_scaling, - "tokenizer_revision": tokenizer_revision, - "quantization_param_path": quantization_param_path, - "max_seq_len_to_capture": max_seq_len_to_capture, - "disable_sliding_window": disable_sliding_window, - "skip_tokenizer_init": skip_tokenizer_init, - "served_model_name": served_model_name, - "override_neuron_config": override_neuron_config, - "mm_processor_kwargs": mm_processor_kwargs, - "block_size": block_size, - "gpu_memory_utilization": gpu_memory_utilization, - "swap_space": swap_space, - "cache_dtype": cache_dtype, - "num_gpu_blocks_override": num_gpu_blocks_override, - "enable_prefix_caching": enable_prefix_caching, - "checkpoint_path": checkpoint_path, - "num_shards": num_shards, - "max_context_length": max_context_length, - "response_role": response_role, - "labels": labels, - } - - model: MetaOapg.properties.model - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_context_length"]) -> MetaOapg.properties.max_context_length: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["response_role"]) -> MetaOapg.properties.response_role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", "labels", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_context_length"]) -> typing.Union[MetaOapg.properties.max_context_length, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["response_role"]) -> typing.Union[MetaOapg.properties.response_role, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", "labels", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model: typing.Union[MetaOapg.properties.model, str, ], - max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, - trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, - pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, - disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, - enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, - load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, - config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, - tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, - limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, - max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, - dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, - code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, - rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, - quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, - max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, - skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, - served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, - override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, - num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_context_length: typing.Union[MetaOapg.properties.max_context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - response_role: typing.Union[MetaOapg.properties.response_role, None, str, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchCompletionsV1ModelConfig': - return super().__new__( - cls, - *_args, - model=model, - max_model_len=max_model_len, - max_num_seqs=max_num_seqs, - enforce_eager=enforce_eager, - trust_remote_code=trust_remote_code, - pipeline_parallel_size=pipeline_parallel_size, - tensor_parallel_size=tensor_parallel_size, - quantization=quantization, - disable_log_requests=disable_log_requests, - chat_template=chat_template, - tool_call_parser=tool_call_parser, - enable_auto_tool_choice=enable_auto_tool_choice, - load_format=load_format, - config_format=config_format, - tokenizer_mode=tokenizer_mode, - limit_mm_per_prompt=limit_mm_per_prompt, - max_num_batched_tokens=max_num_batched_tokens, - tokenizer=tokenizer, - dtype=dtype, - seed=seed, - revision=revision, - code_revision=code_revision, - rope_scaling=rope_scaling, - tokenizer_revision=tokenizer_revision, - quantization_param_path=quantization_param_path, - max_seq_len_to_capture=max_seq_len_to_capture, - disable_sliding_window=disable_sliding_window, - skip_tokenizer_init=skip_tokenizer_init, - served_model_name=served_model_name, - override_neuron_config=override_neuron_config, - mm_processor_kwargs=mm_processor_kwargs, - block_size=block_size, - gpu_memory_utilization=gpu_memory_utilization, - swap_space=swap_space, - cache_dtype=cache_dtype, - num_gpu_blocks_override=num_gpu_blocks_override, - enable_prefix_caching=enable_prefix_caching, - checkpoint_path=checkpoint_path, - num_shards=num_shards, - max_context_length=max_context_length, - response_role=response_role, - labels=labels, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_v1_request.py b/launch/api_client/model/create_batch_completions_v1_request.py deleted file mode 100644 index f2e00cdb..00000000 --- a/launch/api_client/model/create_batch_completions_v1_request.py +++ /dev/null @@ -1,520 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for batch completions. - """ - - - class MetaOapg: - required = { - "model_config", - "output_data_path", - } - - class properties: - output_data_path = schemas.StrSchema - - @staticmethod - def model_config() -> typing.Type['CreateBatchCompletionsV1ModelConfig']: - return CreateBatchCompletionsV1ModelConfig - - - class input_data_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'input_data_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class data_parallelism( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 64 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'data_parallelism': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_runtime_sec( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 172800 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_runtime_sec': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class priority( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def tool_config() -> typing.Type['ToolConfig']: - return ToolConfig - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def content() -> typing.Type['CreateBatchCompletionsV1RequestContent']: - return CreateBatchCompletionsV1RequestContent - __annotations__ = { - "output_data_path": output_data_path, - "model_config": model_config, - "input_data_path": input_data_path, - "labels": labels, - "data_parallelism": data_parallelism, - "max_runtime_sec": max_runtime_sec, - "priority": priority, - "tool_config": tool_config, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "content": content, - } - - model_config: 'CreateBatchCompletionsV1ModelConfig' - output_data_path: MetaOapg.properties.output_data_path - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'CreateBatchCompletionsV1ModelConfig': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["data_parallelism"]) -> MetaOapg.properties.data_parallelism: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_runtime_sec"]) -> MetaOapg.properties.max_runtime_sec: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_config"]) -> 'ToolConfig': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> 'CreateBatchCompletionsV1RequestContent': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'CreateBatchCompletionsV1ModelConfig': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["data_parallelism"]) -> typing.Union[MetaOapg.properties.data_parallelism, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_runtime_sec"]) -> typing.Union[MetaOapg.properties.max_runtime_sec, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_config"]) -> typing.Union['ToolConfig', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union['CreateBatchCompletionsV1RequestContent', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_config: 'CreateBatchCompletionsV1ModelConfig', - output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], - input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - data_parallelism: typing.Union[MetaOapg.properties.data_parallelism, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_runtime_sec: typing.Union[MetaOapg.properties.max_runtime_sec, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, - tool_config: typing.Union['ToolConfig', schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - content: typing.Union['CreateBatchCompletionsV1RequestContent', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchCompletionsV1Request': - return super().__new__( - cls, - *_args, - model_config=model_config, - output_data_path=output_data_path, - input_data_path=input_data_path, - labels=labels, - data_parallelism=data_parallelism, - max_runtime_sec=max_runtime_sec, - priority=priority, - tool_config=tool_config, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - content=content, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.create_batch_completions_v1_model_config import ( - CreateBatchCompletionsV1ModelConfig, -) -from launch.api_client.model.create_batch_completions_v1_request_content import ( - CreateBatchCompletionsV1RequestContent, -) -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.tool_config import ToolConfig diff --git a/launch/api_client/model/create_batch_completions_v1_request_content.py b/launch/api_client/model/create_batch_completions_v1_request_content.py deleted file mode 100644 index 9501d737..00000000 --- a/launch/api_client/model/create_batch_completions_v1_request_content.py +++ /dev/null @@ -1,363 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsV1RequestContent( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "max_new_tokens", - "temperature", - "prompts", - } - - class properties: - - - class prompts( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prompts': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - max_new_tokens = schemas.IntSchema - - - class temperature( - schemas.NumberSchema - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - - class stop_sequences( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stop_sequences': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class return_token_log_probs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'return_token_log_probs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class presence_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'presence_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class frequency_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'frequency_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_k( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = -1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_k': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "prompts": prompts, - "max_new_tokens": max_new_tokens, - "temperature": temperature, - "stop_sequences": stop_sequences, - "return_token_log_probs": return_token_log_probs, - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "top_k": top_k, - "top_p": top_p, - "skip_special_tokens": skip_special_tokens, - } - - max_new_tokens: MetaOapg.properties.max_new_tokens - temperature: MetaOapg.properties.temperature - prompts: MetaOapg.properties.prompts - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_token_log_probs"]) -> MetaOapg.properties.return_token_log_probs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompts", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "skip_special_tokens", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop_sequences"]) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["return_token_log_probs"]) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompts", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "skip_special_tokens", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - max_new_tokens: typing.Union[MetaOapg.properties.max_new_tokens, decimal.Decimal, int, ], - temperature: typing.Union[MetaOapg.properties.temperature, decimal.Decimal, int, float, ], - prompts: typing.Union[MetaOapg.properties.prompts, list, tuple, ], - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, None, schemas.Unset] = schemas.unset, - return_token_log_probs: typing.Union[MetaOapg.properties.return_token_log_probs, None, bool, schemas.Unset] = schemas.unset, - presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchCompletionsV1RequestContent': - return super().__new__( - cls, - *_args, - max_new_tokens=max_new_tokens, - temperature=temperature, - prompts=prompts, - stop_sequences=stop_sequences, - return_token_log_probs=return_token_log_probs, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - top_k=top_k, - top_p=top_p, - skip_special_tokens=skip_special_tokens, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_v1_response.py b/launch/api_client/model/create_batch_completions_v1_response.py deleted file mode 100644 index 93385a5d..00000000 --- a/launch/api_client/model/create_batch_completions_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "job_id", - } - - class properties: - job_id = schemas.StrSchema - __annotations__ = { - "job_id": job_id, - } - - job_id: MetaOapg.properties.job_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - job_id: typing.Union[MetaOapg.properties.job_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchCompletionsV1Response': - return super().__new__( - cls, - *_args, - job_id=job_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_v2_request.py b/launch/api_client/model/create_batch_completions_v2_request.py deleted file mode 100644 index efd81e88..00000000 --- a/launch/api_client/model/create_batch_completions_v2_request.py +++ /dev/null @@ -1,612 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchCompletionsV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for batch completions. - """ - - - class MetaOapg: - required = { - "model_config", - "output_data_path", - } - - class properties: - output_data_path = schemas.StrSchema - - @staticmethod - def model_config() -> typing.Type['BatchCompletionsModelConfig']: - return BatchCompletionsModelConfig - - - class input_data_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'input_data_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class data_parallelism( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 64 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'data_parallelism': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_runtime_sec( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 172800 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_runtime_sec': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class priority( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def tool_config() -> typing.Type['ToolConfig']: - return ToolConfig - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class content( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - - class any_of_1( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['FilteredCompletionV2Request']: - return FilteredCompletionV2Request - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['FilteredCompletionV2Request'], typing.List['FilteredCompletionV2Request']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'any_of_1': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'FilteredCompletionV2Request': - return super().__getitem__(i) - - - class any_of_2( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['FilteredChatCompletionV2Request']: - return FilteredChatCompletionV2Request - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['FilteredChatCompletionV2Request'], typing.List['FilteredChatCompletionV2Request']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'any_of_2': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'FilteredChatCompletionV2Request': - return super().__getitem__(i) - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateBatchCompletionsV1RequestContent, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "output_data_path": output_data_path, - "model_config": model_config, - "input_data_path": input_data_path, - "labels": labels, - "data_parallelism": data_parallelism, - "max_runtime_sec": max_runtime_sec, - "priority": priority, - "tool_config": tool_config, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "content": content, - } - - model_config: 'BatchCompletionsModelConfig' - output_data_path: MetaOapg.properties.output_data_path - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["data_parallelism"]) -> MetaOapg.properties.data_parallelism: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_runtime_sec"]) -> MetaOapg.properties.max_runtime_sec: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_config"]) -> 'ToolConfig': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["data_parallelism"]) -> typing.Union[MetaOapg.properties.data_parallelism, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_runtime_sec"]) -> typing.Union[MetaOapg.properties.max_runtime_sec, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_config"]) -> typing.Union['ToolConfig', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_config: 'BatchCompletionsModelConfig', - output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], - input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - data_parallelism: typing.Union[MetaOapg.properties.data_parallelism, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_runtime_sec: typing.Union[MetaOapg.properties.max_runtime_sec, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, - tool_config: typing.Union['ToolConfig', schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchCompletionsV2Request': - return super().__new__( - cls, - *_args, - model_config=model_config, - output_data_path=output_data_path, - input_data_path=input_data_path, - labels=labels, - data_parallelism=data_parallelism, - max_runtime_sec=max_runtime_sec, - priority=priority, - tool_config=tool_config, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - content=content, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_completions_model_config import ( - BatchCompletionsModelConfig, -) -from launch.api_client.model.create_batch_completions_v1_request_content import ( - CreateBatchCompletionsV1RequestContent, -) -from launch.api_client.model.filtered_chat_completion_v2_request import ( - FilteredChatCompletionV2Request, -) -from launch.api_client.model.filtered_completion_v2_request import ( - FilteredCompletionV2Request, -) -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.tool_config import ToolConfig diff --git a/launch/api_client/model/create_batch_job_resource_requests.py b/launch/api_client/model/create_batch_job_resource_requests.py deleted file mode 100644 index ec6cecad..00000000 --- a/launch/api_client/model/create_batch_job_resource_requests.py +++ /dev/null @@ -1,349 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchJobResourceRequests( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class max_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class concurrent_requests_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'concurrent_requests_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "cpus": cpus, - "memory": memory, - "gpus": gpus, - "gpu_type": gpu_type, - "storage": storage, - "max_workers": max_workers, - "per_worker": per_worker, - "concurrent_requests_per_worker": concurrent_requests_per_worker, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "max_workers", "per_worker", "concurrent_requests_per_worker", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> typing.Union[MetaOapg.properties.concurrent_requests_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "max_workers", "per_worker", "concurrent_requests_per_worker", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchJobResourceRequests': - return super().__new__( - cls, - *_args, - cpus=cpus, - memory=memory, - gpus=gpus, - gpu_type=gpu_type, - storage=storage, - max_workers=max_workers, - per_worker=per_worker, - concurrent_requests_per_worker=concurrent_requests_per_worker, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/create_batch_job_v1_request.py b/launch/api_client/model/create_batch_job_v1_request.py deleted file mode 100644 index 82af83c1..00000000 --- a/launch/api_client/model/create_batch_job_v1_request.py +++ /dev/null @@ -1,182 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchJobV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "model_bundle_id", - "resource_requests", - "serialization_format", - "input_path", - "labels", - } - - class properties: - model_bundle_id = schemas.StrSchema - input_path = schemas.StrSchema - - @staticmethod - def serialization_format() -> typing.Type['BatchJobSerializationFormat']: - return BatchJobSerializationFormat - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def resource_requests() -> typing.Type['CreateBatchJobResourceRequests']: - return CreateBatchJobResourceRequests - timeout_seconds = schemas.NumberSchema - __annotations__ = { - "model_bundle_id": model_bundle_id, - "input_path": input_path, - "serialization_format": serialization_format, - "labels": labels, - "resource_requests": resource_requests, - "timeout_seconds": timeout_seconds, - } - - model_bundle_id: MetaOapg.properties.model_bundle_id - resource_requests: 'CreateBatchJobResourceRequests' - serialization_format: 'BatchJobSerializationFormat' - input_path: MetaOapg.properties.input_path - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["serialization_format"]) -> 'BatchJobSerializationFormat': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateBatchJobResourceRequests': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timeout_seconds"]) -> MetaOapg.properties.timeout_seconds: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "input_path", "serialization_format", "labels", "resource_requests", "timeout_seconds", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["serialization_format"]) -> 'BatchJobSerializationFormat': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateBatchJobResourceRequests': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["timeout_seconds"]) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "input_path", "serialization_format", "labels", "resource_requests", "timeout_seconds", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], - resource_requests: 'CreateBatchJobResourceRequests', - serialization_format: 'BatchJobSerializationFormat', - input_path: typing.Union[MetaOapg.properties.input_path, str, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - timeout_seconds: typing.Union[MetaOapg.properties.timeout_seconds, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchJobV1Request': - return super().__new__( - cls, - *_args, - model_bundle_id=model_bundle_id, - resource_requests=resource_requests, - serialization_format=serialization_format, - input_path=input_path, - labels=labels, - timeout_seconds=timeout_seconds, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_job_serialization_format import ( - BatchJobSerializationFormat, -) -from launch.api_client.model.create_batch_job_resource_requests import ( - CreateBatchJobResourceRequests, -) diff --git a/launch/api_client/model/create_batch_job_v1_response.py b/launch/api_client/model/create_batch_job_v1_response.py deleted file mode 100644 index 59f232e2..00000000 --- a/launch/api_client/model/create_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateBatchJobV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "job_id", - } - - class properties: - job_id = schemas.StrSchema - __annotations__ = { - "job_id": job_id, - } - - job_id: MetaOapg.properties.job_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - job_id: typing.Union[MetaOapg.properties.job_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateBatchJobV1Response': - return super().__new__( - cls, - *_args, - job_id=job_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_chat_completion_response.py b/launch/api_client/model/create_chat_completion_response.py deleted file mode 100644 index fdbcbf28..00000000 --- a/launch/api_client/model/create_chat_completion_response.py +++ /dev/null @@ -1,230 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateChatCompletionResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "created", - "model", - "id", - "choices", - "object", - } - - class properties: - id = schemas.StrSchema - - - class choices( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['Choice']: - return Choice - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['Choice'], typing.List['Choice']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'choices': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'Choice': - return super().__getitem__(i) - created = schemas.IntSchema - model = schemas.StrSchema - - - class object( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "chat.completion": "CHAT_COMPLETION", - } - - @schemas.classproperty - def CHAT_COMPLETION(cls): - return cls("chat.completion") - - @staticmethod - def service_tier() -> typing.Type['ServiceTier']: - return ServiceTier - - - class system_fingerprint( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'system_fingerprint': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def usage() -> typing.Type['CompletionUsage']: - return CompletionUsage - __annotations__ = { - "id": id, - "choices": choices, - "created": created, - "model": model, - "object": object, - "service_tier": service_tier, - "system_fingerprint": system_fingerprint, - "usage": usage, - } - - created: MetaOapg.properties.created - model: MetaOapg.properties.model - id: MetaOapg.properties.id - choices: MetaOapg.properties.choices - object: MetaOapg.properties.object - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["system_fingerprint"]) -> MetaOapg.properties.system_fingerprint: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["usage"]) -> 'CompletionUsage': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["system_fingerprint"]) -> typing.Union[MetaOapg.properties.system_fingerprint, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["usage"]) -> typing.Union['CompletionUsage', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - created: typing.Union[MetaOapg.properties.created, decimal.Decimal, int, ], - model: typing.Union[MetaOapg.properties.model, str, ], - id: typing.Union[MetaOapg.properties.id, str, ], - choices: typing.Union[MetaOapg.properties.choices, list, tuple, ], - object: typing.Union[MetaOapg.properties.object, str, ], - service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, - system_fingerprint: typing.Union[MetaOapg.properties.system_fingerprint, None, str, schemas.Unset] = schemas.unset, - usage: typing.Union['CompletionUsage', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateChatCompletionResponse': - return super().__new__( - cls, - *_args, - created=created, - model=model, - id=id, - choices=choices, - object=object, - service_tier=service_tier, - system_fingerprint=system_fingerprint, - usage=usage, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.choice import Choice -from launch.api_client.model.completion_usage import CompletionUsage -from launch.api_client.model.service_tier import ServiceTier diff --git a/launch/api_client/model/create_chat_completion_stream_response.py b/launch/api_client/model/create_chat_completion_stream_response.py deleted file mode 100644 index b0b83707..00000000 --- a/launch/api_client/model/create_chat_completion_stream_response.py +++ /dev/null @@ -1,230 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateChatCompletionStreamResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "created", - "model", - "id", - "choices", - "object", - } - - class properties: - id = schemas.StrSchema - - - class choices( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['Choice1']: - return Choice1 - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['Choice1'], typing.List['Choice1']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'choices': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'Choice1': - return super().__getitem__(i) - created = schemas.IntSchema - model = schemas.StrSchema - - - class object( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "chat.completion.chunk": "CHAT_COMPLETION_CHUNK", - } - - @schemas.classproperty - def CHAT_COMPLETION_CHUNK(cls): - return cls("chat.completion.chunk") - - @staticmethod - def service_tier() -> typing.Type['ServiceTier']: - return ServiceTier - - - class system_fingerprint( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'system_fingerprint': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def usage() -> typing.Type['CompletionUsage']: - return CompletionUsage - __annotations__ = { - "id": id, - "choices": choices, - "created": created, - "model": model, - "object": object, - "service_tier": service_tier, - "system_fingerprint": system_fingerprint, - "usage": usage, - } - - created: MetaOapg.properties.created - model: MetaOapg.properties.model - id: MetaOapg.properties.id - choices: MetaOapg.properties.choices - object: MetaOapg.properties.object - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["system_fingerprint"]) -> MetaOapg.properties.system_fingerprint: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["usage"]) -> 'CompletionUsage': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["system_fingerprint"]) -> typing.Union[MetaOapg.properties.system_fingerprint, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["usage"]) -> typing.Union['CompletionUsage', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - created: typing.Union[MetaOapg.properties.created, decimal.Decimal, int, ], - model: typing.Union[MetaOapg.properties.model, str, ], - id: typing.Union[MetaOapg.properties.id, str, ], - choices: typing.Union[MetaOapg.properties.choices, list, tuple, ], - object: typing.Union[MetaOapg.properties.object, str, ], - service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, - system_fingerprint: typing.Union[MetaOapg.properties.system_fingerprint, None, str, schemas.Unset] = schemas.unset, - usage: typing.Union['CompletionUsage', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateChatCompletionStreamResponse': - return super().__new__( - cls, - *_args, - created=created, - model=model, - id=id, - choices=choices, - object=object, - service_tier=service_tier, - system_fingerprint=system_fingerprint, - usage=usage, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.choice1 import Choice1 -from launch.api_client.model.completion_usage import CompletionUsage -from launch.api_client.model.service_tier import ServiceTier diff --git a/launch/api_client/model/create_completion_response.py b/launch/api_client/model/create_completion_response.py deleted file mode 100644 index 963b10b1..00000000 --- a/launch/api_client/model/create_completion_response.py +++ /dev/null @@ -1,216 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateCompletionResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "created", - "model", - "id", - "choices", - "object", - } - - class properties: - id = schemas.StrSchema - - - class choices( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['Choice2']: - return Choice2 - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['Choice2'], typing.List['Choice2']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'choices': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'Choice2': - return super().__getitem__(i) - created = schemas.IntSchema - model = schemas.StrSchema - - - class object( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "text_completion": "TEXT_COMPLETION", - } - - @schemas.classproperty - def TEXT_COMPLETION(cls): - return cls("text_completion") - - - class system_fingerprint( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'system_fingerprint': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def usage() -> typing.Type['CompletionUsage']: - return CompletionUsage - __annotations__ = { - "id": id, - "choices": choices, - "created": created, - "model": model, - "object": object, - "system_fingerprint": system_fingerprint, - "usage": usage, - } - - created: MetaOapg.properties.created - model: MetaOapg.properties.model - id: MetaOapg.properties.id - choices: MetaOapg.properties.choices - object: MetaOapg.properties.object - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["system_fingerprint"]) -> MetaOapg.properties.system_fingerprint: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["usage"]) -> 'CompletionUsage': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "system_fingerprint", "usage", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["system_fingerprint"]) -> typing.Union[MetaOapg.properties.system_fingerprint, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["usage"]) -> typing.Union['CompletionUsage', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "system_fingerprint", "usage", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - created: typing.Union[MetaOapg.properties.created, decimal.Decimal, int, ], - model: typing.Union[MetaOapg.properties.model, str, ], - id: typing.Union[MetaOapg.properties.id, str, ], - choices: typing.Union[MetaOapg.properties.choices, list, tuple, ], - object: typing.Union[MetaOapg.properties.object, str, ], - system_fingerprint: typing.Union[MetaOapg.properties.system_fingerprint, None, str, schemas.Unset] = schemas.unset, - usage: typing.Union['CompletionUsage', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateCompletionResponse': - return super().__new__( - cls, - *_args, - created=created, - model=model, - id=id, - choices=choices, - object=object, - system_fingerprint=system_fingerprint, - usage=usage, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.choice2 import Choice2 -from launch.api_client.model.completion_usage import CompletionUsage diff --git a/launch/api_client/model/create_deep_speed_model_endpoint_request.py b/launch/api_client/model/create_deep_speed_model_endpoint_request.py deleted file mode 100644 index a6aa457b..00000000 --- a/launch/api_client/model/create_deep_speed_model_endpoint_request.py +++ /dev/null @@ -1,842 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateDeepSpeedModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - name = schemas.StrSchema - model_name = schemas.StrSchema - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - min_workers = schemas.IntSchema - max_workers = schemas.IntSchema - per_worker = schemas.IntSchema - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "deepspeed": "DEEPSPEED", - } - - @schemas.classproperty - def DEEPSPEED(cls): - return cls("deepspeed") - __annotations__ = { - "name": name, - "model_name": model_name, - "metadata": metadata, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "source": source, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "endpoint_type": endpoint_type, - "inference_framework": inference_framework, - } - - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - name: typing.Union[MetaOapg.properties.name, str, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateDeepSpeedModelEndpointRequest': - return super().__new__( - cls, - *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - source=source, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - endpoint_type=endpoint_type, - inference_framework=inference_framework, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py deleted file mode 100644 index 3cbb634f..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateDockerImageBatchJobBundleV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "image_repository", - "name", - "image_tag", - "command", - } - - class properties: - name = schemas.StrSchema - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - - - class command( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'command': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class env( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class mount_location( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'mount_location': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def resource_requests() -> typing.Type['CreateDockerImageBatchJobResourceRequests']: - return CreateDockerImageBatchJobResourceRequests - - - class public( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "name": name, - "image_repository": image_repository, - "image_tag": image_tag, - "command": command, - "env": env, - "mount_location": mount_location, - "resource_requests": resource_requests, - "public": public, - } - - image_repository: MetaOapg.properties.image_repository - name: MetaOapg.properties.name - image_tag: MetaOapg.properties.image_tag - command: MetaOapg.properties.command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateDockerImageBatchJobResourceRequests': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "image_repository", "image_tag", "command", "env", "mount_location", "resource_requests", "public", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mount_location"]) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["resource_requests"]) -> typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public"]) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "image_repository", "image_tag", "command", "env", "mount_location", "resource_requests", "public", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - image_repository: typing.Union[MetaOapg.properties.image_repository, str, ], - name: typing.Union[MetaOapg.properties.name, str, ], - image_tag: typing.Union[MetaOapg.properties.image_tag, str, ], - command: typing.Union[MetaOapg.properties.command, list, tuple, ], - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - mount_location: typing.Union[MetaOapg.properties.mount_location, None, str, schemas.Unset] = schemas.unset, - resource_requests: typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset] = schemas.unset, - public: typing.Union[MetaOapg.properties.public, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateDockerImageBatchJobBundleV1Request': - return super().__new__( - cls, - *_args, - image_repository=image_repository, - name=name, - image_tag=image_tag, - command=command, - env=env, - mount_location=mount_location, - resource_requests=resource_requests, - public=public, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) diff --git a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index 03394c6a..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateDockerImageBatchJobBundleV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "docker_image_batch_job_bundle_id", - } - - class properties: - docker_image_batch_job_bundle_id = schemas.StrSchema - __annotations__ = { - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, - } - - docker_image_batch_job_bundle_id: MetaOapg.properties.docker_image_batch_job_bundle_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundle_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundle_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - docker_image_batch_job_bundle_id: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateDockerImageBatchJobBundleV1Response': - return super().__new__( - cls, - *_args, - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_docker_image_batch_job_resource_requests.py b/launch/api_client/model/create_docker_image_batch_job_resource_requests.py deleted file mode 100644 index a31aef74..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_resource_requests.py +++ /dev/null @@ -1,291 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateDockerImageBatchJobResourceRequests( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "cpus": cpus, - "memory": memory, - "gpus": gpus, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "nodes_per_worker", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "nodes_per_worker", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateDockerImageBatchJobResourceRequests': - return super().__new__( - cls, - *_args, - cpus=cpus, - memory=memory, - gpus=gpus, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/create_docker_image_batch_job_v1_request.py b/launch/api_client/model/create_docker_image_batch_job_v1_request.py deleted file mode 100644 index 58029c8e..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_v1_request.py +++ /dev/null @@ -1,257 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateDockerImageBatchJobV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "labels", - } - - class properties: - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class docker_image_batch_job_bundle_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'docker_image_batch_job_bundle_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class docker_image_batch_job_bundle_id( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'docker_image_batch_job_bundle_id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class job_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'job_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def resource_requests() -> typing.Type['CreateDockerImageBatchJobResourceRequests']: - return CreateDockerImageBatchJobResourceRequests - - - class override_job_max_runtime_s( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'override_job_max_runtime_s': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "labels": labels, - "docker_image_batch_job_bundle_name": docker_image_batch_job_bundle_name, - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, - "job_config": job_config, - "resource_requests": resource_requests, - "override_job_max_runtime_s": override_job_max_runtime_s, - } - - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"]) -> MetaOapg.properties.docker_image_batch_job_bundle_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_config"]) -> MetaOapg.properties.job_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateDockerImageBatchJobResourceRequests': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> MetaOapg.properties.override_job_max_runtime_s: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["labels", "docker_image_batch_job_bundle_name", "docker_image_batch_job_bundle_id", "job_config", "resource_requests", "override_job_max_runtime_s", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"]) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_config"]) -> typing.Union[MetaOapg.properties.job_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["resource_requests"]) -> typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["labels", "docker_image_batch_job_bundle_name", "docker_image_batch_job_bundle_id", "job_config", "resource_requests", "override_job_max_runtime_s", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - docker_image_batch_job_bundle_name: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_name, None, str, schemas.Unset] = schemas.unset, - docker_image_batch_job_bundle_id: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, None, str, schemas.Unset] = schemas.unset, - job_config: typing.Union[MetaOapg.properties.job_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - resource_requests: typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset] = schemas.unset, - override_job_max_runtime_s: typing.Union[MetaOapg.properties.override_job_max_runtime_s, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateDockerImageBatchJobV1Request': - return super().__new__( - cls, - *_args, - labels=labels, - docker_image_batch_job_bundle_name=docker_image_batch_job_bundle_name, - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - job_config=job_config, - resource_requests=resource_requests, - override_job_max_runtime_s=override_job_max_runtime_s, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) diff --git a/launch/api_client/model/create_docker_image_batch_job_v1_response.py b/launch/api_client/model/create_docker_image_batch_job_v1_response.py deleted file mode 100644 index f90e3731..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateDockerImageBatchJobV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "job_id", - } - - class properties: - job_id = schemas.StrSchema - __annotations__ = { - "job_id": job_id, - } - - job_id: MetaOapg.properties.job_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - job_id: typing.Union[MetaOapg.properties.job_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateDockerImageBatchJobV1Response': - return super().__new__( - cls, - *_args, - job_id=job_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_job_request.py b/launch/api_client/model/create_fine_tune_job_request.py deleted file mode 100644 index ec449348..00000000 --- a/launch/api_client/model/create_fine_tune_job_request.py +++ /dev/null @@ -1,229 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateFineTuneRequest(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "training_file", - "hyperparameters", - "model", - } - - class properties: - class hyperparameters(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[ - str, - ], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[ - str, - ], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "hyperparameters": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - model = schemas.StrSchema - training_file = schemas.StrSchema - suffix = schemas.StrSchema - validation_file = schemas.StrSchema - __annotations__ = { - "hyperparameters": hyperparameters, - "model": model, - "training_file": training_file, - "suffix": suffix, - "validation_file": validation_file, - } - - training_file: MetaOapg.properties.training_file - hyperparameters: MetaOapg.properties.hyperparameters - model: MetaOapg.properties.model - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["validation_file"]) -> MetaOapg.properties.validation_file: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["suffix"] - ) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["validation_file"] - ) -> typing.Union[MetaOapg.properties.validation_file, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - training_file: typing.Union[ - MetaOapg.properties.training_file, - str, - ], - hyperparameters: typing.Union[ - MetaOapg.properties.hyperparameters, - dict, - frozendict.frozendict, - ], - model: typing.Union[ - MetaOapg.properties.model, - str, - ], - suffix: typing.Union[MetaOapg.properties.suffix, str, schemas.Unset] = schemas.unset, - validation_file: typing.Union[MetaOapg.properties.validation_file, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneRequest": - return super().__new__( - cls, - *_args, - training_file=training_file, - hyperparameters=hyperparameters, - model=model, - suffix=suffix, - validation_file=validation_file, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_job_response.py b/launch/api_client/model/create_fine_tune_job_response.py deleted file mode 100644 index 3a1d3224..00000000 --- a/launch/api_client/model/create_fine_tune_job_response.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateFineTuneResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "fine_tune_id", - } - - class properties: - fine_tune_id = schemas.StrSchema - __annotations__ = { - "fine_tune_id": fine_tune_id, - } - - fine_tune_id: MetaOapg.properties.fine_tune_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "fine_tune_id", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "fine_tune_id", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - fine_tune_id: typing.Union[ - MetaOapg.properties.fine_tune_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneResponse": - return super().__new__( - cls, - *_args, - fine_tune_id=fine_tune_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_request.py b/launch/api_client/model/create_fine_tune_request.py deleted file mode 100644 index 0672053c..00000000 --- a/launch/api_client/model/create_fine_tune_request.py +++ /dev/null @@ -1,305 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateFineTuneRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "training_file", - "hyperparameters", - "model", - } - - class properties: - model = schemas.StrSchema - training_file = schemas.StrSchema - - - class hyperparameters( - schemas.DictSchema - ): - - - class MetaOapg: - - - class additional_properties( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - - class any_of_3( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'any_of_3': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - cls.any_of_3, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'additional_properties': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'hyperparameters': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class validation_file( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'validation_file': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class suffix( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'suffix': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class wandb_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'wandb_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "model": model, - "training_file": training_file, - "hyperparameters": hyperparameters, - "validation_file": validation_file, - "suffix": suffix, - "wandb_config": wandb_config, - } - - training_file: MetaOapg.properties.training_file - hyperparameters: MetaOapg.properties.hyperparameters - model: MetaOapg.properties.model - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["validation_file"]) -> MetaOapg.properties.validation_file: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["wandb_config"]) -> MetaOapg.properties.wandb_config: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "training_file", "hyperparameters", "validation_file", "suffix", "wandb_config", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["validation_file"]) -> typing.Union[MetaOapg.properties.validation_file, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["suffix"]) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["wandb_config"]) -> typing.Union[MetaOapg.properties.wandb_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "training_file", "hyperparameters", "validation_file", "suffix", "wandb_config", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - training_file: typing.Union[MetaOapg.properties.training_file, str, ], - hyperparameters: typing.Union[MetaOapg.properties.hyperparameters, dict, frozendict.frozendict, ], - model: typing.Union[MetaOapg.properties.model, str, ], - validation_file: typing.Union[MetaOapg.properties.validation_file, None, str, schemas.Unset] = schemas.unset, - suffix: typing.Union[MetaOapg.properties.suffix, None, str, schemas.Unset] = schemas.unset, - wandb_config: typing.Union[MetaOapg.properties.wandb_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateFineTuneRequest': - return super().__new__( - cls, - *_args, - training_file=training_file, - hyperparameters=hyperparameters, - model=model, - validation_file=validation_file, - suffix=suffix, - wandb_config=wandb_config, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_response.py b/launch/api_client/model/create_fine_tune_response.py deleted file mode 100644 index 449f305e..00000000 --- a/launch/api_client/model/create_fine_tune_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateFineTuneResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "id", - } - - class properties: - id = schemas.StrSchema - __annotations__ = { - "id": id, - } - - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - id: typing.Union[MetaOapg.properties.id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateFineTuneResponse': - return super().__new__( - cls, - *_args, - id=id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_light_llm_model_endpoint_request.py b/launch/api_client/model/create_light_llm_model_endpoint_request.py deleted file mode 100644 index e8b8fa2c..00000000 --- a/launch/api_client/model/create_light_llm_model_endpoint_request.py +++ /dev/null @@ -1,842 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateLightLLMModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - name = schemas.StrSchema - model_name = schemas.StrSchema - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - min_workers = schemas.IntSchema - max_workers = schemas.IntSchema - per_worker = schemas.IntSchema - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "lightllm": "LIGHTLLM", - } - - @schemas.classproperty - def LIGHTLLM(cls): - return cls("lightllm") - __annotations__ = { - "name": name, - "model_name": model_name, - "metadata": metadata, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "source": source, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "endpoint_type": endpoint_type, - "inference_framework": inference_framework, - } - - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - name: typing.Union[MetaOapg.properties.name, str, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateLightLLMModelEndpointRequest': - return super().__new__( - cls, - *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - source=source, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - endpoint_type=endpoint_type, - inference_framework=inference_framework, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_llm_model_endpoint_v1_request.py b/launch/api_client/model/create_llm_model_endpoint_v1_request.py deleted file mode 100644 index 28fdbae5..00000000 --- a/launch/api_client/model/create_llm_model_endpoint_v1_request.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateLLMModelEndpointV1Request( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateVLLMModelEndpointRequest, - CreateSGLangModelEndpointRequest, - CreateDeepSpeedModelEndpointRequest, - CreateTextGenerationInferenceModelEndpointRequest, - CreateLightLLMModelEndpointRequest, - CreateTensorRTLLMModelEndpointRequest, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateLLMModelEndpointV1Request': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.create_deep_speed_model_endpoint_request import ( - CreateDeepSpeedModelEndpointRequest, -) -from launch.api_client.model.create_light_llm_model_endpoint_request import ( - CreateLightLLMModelEndpointRequest, -) -from launch.api_client.model.create_sg_lang_model_endpoint_request import ( - CreateSGLangModelEndpointRequest, -) -from launch.api_client.model.create_tensor_rtllm_model_endpoint_request import ( - CreateTensorRTLLMModelEndpointRequest, -) -from launch.api_client.model.create_text_generation_inference_model_endpoint_request import ( - CreateTextGenerationInferenceModelEndpointRequest, -) -from launch.api_client.model.create_vllm_model_endpoint_request import ( - CreateVLLMModelEndpointRequest, -) diff --git a/launch/api_client/model/create_llm_model_endpoint_v1_response.py b/launch/api_client/model/create_llm_model_endpoint_v1_response.py deleted file mode 100644 index f7742cc3..00000000 --- a/launch/api_client/model/create_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateLLMModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateLLMModelEndpointV1Response': - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_model_bundle_v1_request.py b/launch/api_client/model/create_model_bundle_v1_request.py deleted file mode 100644 index 83ee0b75..00000000 --- a/launch/api_client/model/create_model_bundle_v1_request.py +++ /dev/null @@ -1,281 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateModelBundleV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for creating a Model Bundle. - """ - - - class MetaOapg: - required = { - "requirements", - "packaging_type", - "name", - "location", - "env_params", - } - - class properties: - name = schemas.StrSchema - location = schemas.StrSchema - - - class requirements( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'requirements': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - @staticmethod - def env_params() -> typing.Type['ModelBundleEnvironmentParams']: - return ModelBundleEnvironmentParams - - @staticmethod - def packaging_type() -> typing.Type['ModelBundlePackagingType']: - return ModelBundlePackagingType - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class app_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'app_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class schema_location( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'schema_location': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "name": name, - "location": location, - "requirements": requirements, - "env_params": env_params, - "packaging_type": packaging_type, - "metadata": metadata, - "app_config": app_config, - "schema_location": schema_location, - } - - requirements: MetaOapg.properties.requirements - packaging_type: 'ModelBundlePackagingType' - name: MetaOapg.properties.name - location: MetaOapg.properties.location - env_params: 'ModelBundleEnvironmentParams' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "location", "requirements", "env_params", "packaging_type", "metadata", "app_config", "schema_location", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "location", "requirements", "env_params", "packaging_type", "metadata", "app_config", "schema_location", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], - packaging_type: 'ModelBundlePackagingType', - name: typing.Union[MetaOapg.properties.name, str, ], - location: typing.Union[MetaOapg.properties.location, str, ], - env_params: 'ModelBundleEnvironmentParams', - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - schema_location: typing.Union[MetaOapg.properties.schema_location, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateModelBundleV1Request': - return super().__new__( - cls, - *_args, - requirements=requirements, - packaging_type=packaging_type, - name=name, - location=location, - env_params=env_params, - metadata=metadata, - app_config=app_config, - schema_location=schema_location, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch.api_client.model.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) diff --git a/launch/api_client/model/create_model_bundle_v1_response.py b/launch/api_client/model/create_model_bundle_v1_response.py deleted file mode 100644 index 1a461552..00000000 --- a/launch/api_client/model/create_model_bundle_v1_response.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateModelBundleV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for creating a Model Bundle. - """ - - - class MetaOapg: - required = { - "model_bundle_id", - } - - class properties: - model_bundle_id = schemas.StrSchema - __annotations__ = { - "model_bundle_id": model_bundle_id, - } - - model_bundle_id: MetaOapg.properties.model_bundle_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateModelBundleV1Response': - return super().__new__( - cls, - *_args, - model_bundle_id=model_bundle_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_model_bundle_v2_request.py b/launch/api_client/model/create_model_bundle_v2_request.py deleted file mode 100644 index 1f21fd72..00000000 --- a/launch/api_client/model/create_model_bundle_v2_request.py +++ /dev/null @@ -1,202 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateModelBundleV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for creating a Model Bundle. - """ - - - class MetaOapg: - required = { - "flavor", - "name", - "schema_location", - } - - class properties: - name = schemas.StrSchema - schema_location = schemas.StrSchema - - - class flavor( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CloudpickleArtifactFlavor, - ZipArtifactFlavor, - RunnableImageFlavor, - StreamingEnhancedRunnableImageFlavor, - TritonEnhancedRunnableImageFlavor, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'flavor': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "name": name, - "schema_location": schema_location, - "flavor": flavor, - "metadata": metadata, - } - - flavor: MetaOapg.properties.flavor - name: MetaOapg.properties.name - schema_location: MetaOapg.properties.schema_location - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "schema_location", "flavor", "metadata", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "schema_location", "flavor", "metadata", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - flavor: typing.Union[MetaOapg.properties.flavor, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - name: typing.Union[MetaOapg.properties.name, str, ], - schema_location: typing.Union[MetaOapg.properties.schema_location, str, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateModelBundleV2Request': - return super().__new__( - cls, - *_args, - flavor=flavor, - name=name, - schema_location=schema_location, - metadata=metadata, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) -from launch.api_client.model.runnable_image_flavor import RunnableImageFlavor -from launch.api_client.model.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) -from launch.api_client.model.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) -from launch.api_client.model.zip_artifact_flavor import ZipArtifactFlavor diff --git a/launch/api_client/model/create_model_bundle_v2_response.py b/launch/api_client/model/create_model_bundle_v2_response.py deleted file mode 100644 index b79c1e80..00000000 --- a/launch/api_client/model/create_model_bundle_v2_response.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateModelBundleV2Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for creating a Model Bundle. - """ - - - class MetaOapg: - required = { - "model_bundle_id", - } - - class properties: - model_bundle_id = schemas.StrSchema - __annotations__ = { - "model_bundle_id": model_bundle_id, - } - - model_bundle_id: MetaOapg.properties.model_bundle_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateModelBundleV2Response': - return super().__new__( - cls, - *_args, - model_bundle_id=model_bundle_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_model_endpoint_v1_request.py b/launch/api_client/model/create_model_endpoint_v1_request.py deleted file mode 100644 index 06c5a6ab..00000000 --- a/launch/api_client/model/create_model_endpoint_v1_request.py +++ /dev/null @@ -1,715 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateModelEndpointV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "endpoint_type", - "metadata", - "memory", - "cpus", - "max_workers", - "model_bundle_id", - "min_workers", - "gpus", - "name", - "per_worker", - "storage", - "labels", - } - - class properties: - - - class name( - schemas.StrSchema - ): - - - class MetaOapg: - max_length = 63 - model_bundle_id = schemas.StrSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntSchema - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class min_workers( - schemas.IntSchema - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - class max_workers( - schemas.IntSchema - ): - - - class MetaOapg: - inclusive_minimum = 0 - per_worker = schemas.IntSchema - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - nodes_per_worker = schemas.IntSchema - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class concurrent_requests_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'concurrent_requests_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "name": name, - "model_bundle_id": model_bundle_id, - "endpoint_type": endpoint_type, - "metadata": metadata, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "storage": storage, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "post_inference_hooks": post_inference_hooks, - "gpu_type": gpu_type, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "concurrent_requests_per_worker": concurrent_requests_per_worker, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - } - - endpoint_type: 'ModelEndpointType' - metadata: MetaOapg.properties.metadata - memory: MetaOapg.properties.memory - cpus: MetaOapg.properties.cpus - max_workers: MetaOapg.properties.max_workers - model_bundle_id: MetaOapg.properties.model_bundle_id - min_workers: MetaOapg.properties.min_workers - gpus: MetaOapg.properties.gpus - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - storage: MetaOapg.properties.storage - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_bundle_id", "endpoint_type", "metadata", "cpus", "gpus", "memory", "storage", "min_workers", "max_workers", "per_worker", "labels", "post_inference_hooks", "gpu_type", "nodes_per_worker", "optimize_costs", "concurrent_requests_per_worker", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> typing.Union[MetaOapg.properties.concurrent_requests_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_bundle_id", "endpoint_type", "metadata", "cpus", "gpus", "memory", "storage", "min_workers", "max_workers", "per_worker", "labels", "post_inference_hooks", "gpu_type", "nodes_per_worker", "optimize_costs", "concurrent_requests_per_worker", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - endpoint_type: 'ModelEndpointType', - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, ], - name: typing.Union[MetaOapg.properties.name, str, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateModelEndpointV1Request': - return super().__new__( - cls, - *_args, - endpoint_type=endpoint_type, - metadata=metadata, - memory=memory, - cpus=cpus, - max_workers=max_workers, - model_bundle_id=model_bundle_id, - min_workers=min_workers, - gpus=gpus, - name=name, - per_worker=per_worker, - storage=storage, - labels=labels, - post_inference_hooks=post_inference_hooks, - gpu_type=gpu_type, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - concurrent_requests_per_worker=concurrent_requests_per_worker, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.model_endpoint_type import ModelEndpointType diff --git a/launch/api_client/model/create_model_endpoint_v1_response.py b/launch/api_client/model/create_model_endpoint_v1_response.py deleted file mode 100644 index e8c29dee..00000000 --- a/launch/api_client/model/create_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateModelEndpointV1Response': - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_sg_lang_model_endpoint_request.py b/launch/api_client/model/create_sg_lang_model_endpoint_request.py deleted file mode 100644 index ebb54f24..00000000 --- a/launch/api_client/model/create_sg_lang_model_endpoint_request.py +++ /dev/null @@ -1,3402 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateSGLangModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - name = schemas.StrSchema - model_name = schemas.StrSchema - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - min_workers = schemas.IntSchema - max_workers = schemas.IntSchema - per_worker = schemas.IntSchema - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - - - class trust_remote_code( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'trust_remote_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tp_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tp_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_tokenizer_init( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_tokenizer_init': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class kv_cache_dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'kv_cache_dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization_param_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization_param_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class context_length( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'context_length': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class device( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'device': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class served_model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'served_model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class is_embedding( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'is_embedding': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class mem_fraction_static( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'mem_fraction_static': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_running_requests( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_running_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_total_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_total_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chunked_prefill_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chunked_prefill_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_prefill_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_prefill_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class schedule_policy( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'schedule_policy': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class schedule_conservativeness( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'schedule_conservativeness': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpu_offload_gb( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cpu_offload_gb': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prefill_only_one_req( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prefill_only_one_req': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stream_interval( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stream_interval': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class random_seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'random_seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class constrained_json_whitespace_pattern( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'constrained_json_whitespace_pattern': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class watchdog_timeout( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'watchdog_timeout': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class download_dir( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'download_dir': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class base_gpu_id( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'base_gpu_id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class log_level( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'log_level': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class log_level_http( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'log_level_http': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class log_requests( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'log_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class show_time_cost( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'show_time_cost': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class decode_log_interval( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'decode_log_interval': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class api_key( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'api_key': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class file_storage_pth( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'file_storage_pth': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_cache_report( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_cache_report': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class data_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'data_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_balance_method( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_balance_method': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class expert_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'expert_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dist_init_addr( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dist_init_addr': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class nnodes( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nnodes': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class node_rank( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'node_rank': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class json_model_override_args( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'json_model_override_args': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class lora_paths( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'lora_paths': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_loras_per_batch( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_loras_per_batch': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class attention_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'attention_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class sampling_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'sampling_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class grammar_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'grammar_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_algorithm( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_algorithm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_draft_model_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_draft_model_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_num_steps( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_num_steps': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_num_draft_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_num_draft_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_eagle_topk( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_eagle_topk': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_double_sparsity( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_double_sparsity': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_channel_config_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_channel_config_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_heavy_channel_num( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_heavy_channel_num': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_heavy_token_num( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_heavy_token_num': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_heavy_channel_type( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_heavy_channel_type': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_sparse_decode_threshold( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_sparse_decode_threshold': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_radix_cache( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_radix_cache': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_jump_forward( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_jump_forward': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_cuda_graph( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_cuda_graph': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_cuda_graph_padding( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_cuda_graph_padding': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_outlines_disk_cache( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_outlines_disk_cache': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_custom_all_reduce( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_custom_all_reduce': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_mla( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_mla': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_overlap_schedule( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_overlap_schedule': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_mixed_chunk( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_mixed_chunk': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_dp_attention( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_dp_attention': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_ep_moe( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_ep_moe': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_torch_compile( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_torch_compile': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class torch_compile_max_bs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'torch_compile_max_bs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cuda_graph_max_bs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cuda_graph_max_bs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cuda_graph_bs( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cuda_graph_bs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class torchao_config( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'torchao_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_nan_detection( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_nan_detection': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_p2p_check( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_p2p_check': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class triton_attention_reduce_in_fp32( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'triton_attention_reduce_in_fp32': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class triton_attention_num_kv_splits( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'triton_attention_num_kv_splits': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_continuous_decode_steps( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_continuous_decode_steps': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class delete_ckpt_after_loading( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'delete_ckpt_after_loading': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_memory_saver( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_memory_saver': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class allow_auto_truncate( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'allow_auto_truncate': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_custom_logit_processor( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_custom_logit_processor': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tool_call_parser( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tool_call_parser': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class huggingface_repo( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'huggingface_repo': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "sglang": "SGLANG", - } - - @schemas.classproperty - def SGLANG(cls): - return cls("sglang") - __annotations__ = { - "name": name, - "model_name": model_name, - "metadata": metadata, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "source": source, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "endpoint_type": endpoint_type, - "trust_remote_code": trust_remote_code, - "tp_size": tp_size, - "skip_tokenizer_init": skip_tokenizer_init, - "load_format": load_format, - "dtype": dtype, - "kv_cache_dtype": kv_cache_dtype, - "quantization_param_path": quantization_param_path, - "quantization": quantization, - "context_length": context_length, - "device": device, - "served_model_name": served_model_name, - "chat_template": chat_template, - "is_embedding": is_embedding, - "revision": revision, - "mem_fraction_static": mem_fraction_static, - "max_running_requests": max_running_requests, - "max_total_tokens": max_total_tokens, - "chunked_prefill_size": chunked_prefill_size, - "max_prefill_tokens": max_prefill_tokens, - "schedule_policy": schedule_policy, - "schedule_conservativeness": schedule_conservativeness, - "cpu_offload_gb": cpu_offload_gb, - "prefill_only_one_req": prefill_only_one_req, - "stream_interval": stream_interval, - "random_seed": random_seed, - "constrained_json_whitespace_pattern": constrained_json_whitespace_pattern, - "watchdog_timeout": watchdog_timeout, - "download_dir": download_dir, - "base_gpu_id": base_gpu_id, - "log_level": log_level, - "log_level_http": log_level_http, - "log_requests": log_requests, - "show_time_cost": show_time_cost, - "enable_metrics": enable_metrics, - "decode_log_interval": decode_log_interval, - "api_key": api_key, - "file_storage_pth": file_storage_pth, - "enable_cache_report": enable_cache_report, - "data_parallel_size": data_parallel_size, - "load_balance_method": load_balance_method, - "expert_parallel_size": expert_parallel_size, - "dist_init_addr": dist_init_addr, - "nnodes": nnodes, - "node_rank": node_rank, - "json_model_override_args": json_model_override_args, - "lora_paths": lora_paths, - "max_loras_per_batch": max_loras_per_batch, - "attention_backend": attention_backend, - "sampling_backend": sampling_backend, - "grammar_backend": grammar_backend, - "speculative_algorithm": speculative_algorithm, - "speculative_draft_model_path": speculative_draft_model_path, - "speculative_num_steps": speculative_num_steps, - "speculative_num_draft_tokens": speculative_num_draft_tokens, - "speculative_eagle_topk": speculative_eagle_topk, - "enable_double_sparsity": enable_double_sparsity, - "ds_channel_config_path": ds_channel_config_path, - "ds_heavy_channel_num": ds_heavy_channel_num, - "ds_heavy_token_num": ds_heavy_token_num, - "ds_heavy_channel_type": ds_heavy_channel_type, - "ds_sparse_decode_threshold": ds_sparse_decode_threshold, - "disable_radix_cache": disable_radix_cache, - "disable_jump_forward": disable_jump_forward, - "disable_cuda_graph": disable_cuda_graph, - "disable_cuda_graph_padding": disable_cuda_graph_padding, - "disable_outlines_disk_cache": disable_outlines_disk_cache, - "disable_custom_all_reduce": disable_custom_all_reduce, - "disable_mla": disable_mla, - "disable_overlap_schedule": disable_overlap_schedule, - "enable_mixed_chunk": enable_mixed_chunk, - "enable_dp_attention": enable_dp_attention, - "enable_ep_moe": enable_ep_moe, - "enable_torch_compile": enable_torch_compile, - "torch_compile_max_bs": torch_compile_max_bs, - "cuda_graph_max_bs": cuda_graph_max_bs, - "cuda_graph_bs": cuda_graph_bs, - "torchao_config": torchao_config, - "enable_nan_detection": enable_nan_detection, - "enable_p2p_check": enable_p2p_check, - "triton_attention_reduce_in_fp32": triton_attention_reduce_in_fp32, - "triton_attention_num_kv_splits": triton_attention_num_kv_splits, - "num_continuous_decode_steps": num_continuous_decode_steps, - "delete_ckpt_after_loading": delete_ckpt_after_loading, - "enable_memory_saver": enable_memory_saver, - "allow_auto_truncate": allow_auto_truncate, - "enable_custom_logit_processor": enable_custom_logit_processor, - "tool_call_parser": tool_call_parser, - "huggingface_repo": huggingface_repo, - "inference_framework": inference_framework, - } - - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tp_size"]) -> MetaOapg.properties.tp_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> MetaOapg.properties.kv_cache_dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["context_length"]) -> MetaOapg.properties.context_length: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["device"]) -> MetaOapg.properties.device: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["is_embedding"]) -> MetaOapg.properties.is_embedding: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mem_fraction_static"]) -> MetaOapg.properties.mem_fraction_static: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_running_requests"]) -> MetaOapg.properties.max_running_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_total_tokens"]) -> MetaOapg.properties.max_total_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> MetaOapg.properties.chunked_prefill_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> MetaOapg.properties.max_prefill_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schedule_policy"]) -> MetaOapg.properties.schedule_policy: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> MetaOapg.properties.schedule_conservativeness: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> MetaOapg.properties.cpu_offload_gb: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> MetaOapg.properties.prefill_only_one_req: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream_interval"]) -> MetaOapg.properties.stream_interval: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["random_seed"]) -> MetaOapg.properties.random_seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> MetaOapg.properties.constrained_json_whitespace_pattern: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["watchdog_timeout"]) -> MetaOapg.properties.watchdog_timeout: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["download_dir"]) -> MetaOapg.properties.download_dir: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["base_gpu_id"]) -> MetaOapg.properties.base_gpu_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_level"]) -> MetaOapg.properties.log_level: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_level_http"]) -> MetaOapg.properties.log_level_http: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_requests"]) -> MetaOapg.properties.log_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["show_time_cost"]) -> MetaOapg.properties.show_time_cost: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_metrics"]) -> MetaOapg.properties.enable_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["decode_log_interval"]) -> MetaOapg.properties.decode_log_interval: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["api_key"]) -> MetaOapg.properties.api_key: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["file_storage_pth"]) -> MetaOapg.properties.file_storage_pth: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_cache_report"]) -> MetaOapg.properties.enable_cache_report: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["data_parallel_size"]) -> MetaOapg.properties.data_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_balance_method"]) -> MetaOapg.properties.load_balance_method: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["expert_parallel_size"]) -> MetaOapg.properties.expert_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dist_init_addr"]) -> MetaOapg.properties.dist_init_addr: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nnodes"]) -> MetaOapg.properties.nnodes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["node_rank"]) -> MetaOapg.properties.node_rank: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["json_model_override_args"]) -> MetaOapg.properties.json_model_override_args: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["lora_paths"]) -> MetaOapg.properties.lora_paths: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> MetaOapg.properties.max_loras_per_batch: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["sampling_backend"]) -> MetaOapg.properties.sampling_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["grammar_backend"]) -> MetaOapg.properties.grammar_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_algorithm"]) -> MetaOapg.properties.speculative_algorithm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> MetaOapg.properties.speculative_draft_model_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_num_steps"]) -> MetaOapg.properties.speculative_num_steps: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> MetaOapg.properties.speculative_num_draft_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> MetaOapg.properties.speculative_eagle_topk: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> MetaOapg.properties.enable_double_sparsity: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> MetaOapg.properties.ds_channel_config_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> MetaOapg.properties.ds_heavy_channel_num: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> MetaOapg.properties.ds_heavy_token_num: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> MetaOapg.properties.ds_heavy_channel_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> MetaOapg.properties.ds_sparse_decode_threshold: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_radix_cache"]) -> MetaOapg.properties.disable_radix_cache: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_jump_forward"]) -> MetaOapg.properties.disable_jump_forward: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> MetaOapg.properties.disable_cuda_graph: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> MetaOapg.properties.disable_cuda_graph_padding: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> MetaOapg.properties.disable_outlines_disk_cache: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> MetaOapg.properties.disable_custom_all_reduce: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_mla"]) -> MetaOapg.properties.disable_mla: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> MetaOapg.properties.disable_overlap_schedule: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> MetaOapg.properties.enable_mixed_chunk: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_dp_attention"]) -> MetaOapg.properties.enable_dp_attention: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_ep_moe"]) -> MetaOapg.properties.enable_ep_moe: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_torch_compile"]) -> MetaOapg.properties.enable_torch_compile: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> MetaOapg.properties.torch_compile_max_bs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> MetaOapg.properties.cuda_graph_max_bs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> MetaOapg.properties.cuda_graph_bs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["torchao_config"]) -> MetaOapg.properties.torchao_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_nan_detection"]) -> MetaOapg.properties.enable_nan_detection: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_p2p_check"]) -> MetaOapg.properties.enable_p2p_check: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> MetaOapg.properties.triton_attention_reduce_in_fp32: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> MetaOapg.properties.triton_attention_num_kv_splits: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> MetaOapg.properties.num_continuous_decode_steps: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> MetaOapg.properties.delete_ckpt_after_loading: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_memory_saver"]) -> MetaOapg.properties.enable_memory_saver: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> MetaOapg.properties.allow_auto_truncate: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> MetaOapg.properties.enable_custom_logit_processor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["huggingface_repo"]) -> MetaOapg.properties.huggingface_repo: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", "inference_framework", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tp_size"]) -> typing.Union[MetaOapg.properties.tp_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> typing.Union[MetaOapg.properties.kv_cache_dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["context_length"]) -> typing.Union[MetaOapg.properties.context_length, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["device"]) -> typing.Union[MetaOapg.properties.device, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["is_embedding"]) -> typing.Union[MetaOapg.properties.is_embedding, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mem_fraction_static"]) -> typing.Union[MetaOapg.properties.mem_fraction_static, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_running_requests"]) -> typing.Union[MetaOapg.properties.max_running_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_total_tokens"]) -> typing.Union[MetaOapg.properties.max_total_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> typing.Union[MetaOapg.properties.chunked_prefill_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> typing.Union[MetaOapg.properties.max_prefill_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schedule_policy"]) -> typing.Union[MetaOapg.properties.schedule_policy, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> typing.Union[MetaOapg.properties.schedule_conservativeness, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> typing.Union[MetaOapg.properties.cpu_offload_gb, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> typing.Union[MetaOapg.properties.prefill_only_one_req, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream_interval"]) -> typing.Union[MetaOapg.properties.stream_interval, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["random_seed"]) -> typing.Union[MetaOapg.properties.random_seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["watchdog_timeout"]) -> typing.Union[MetaOapg.properties.watchdog_timeout, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["download_dir"]) -> typing.Union[MetaOapg.properties.download_dir, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["base_gpu_id"]) -> typing.Union[MetaOapg.properties.base_gpu_id, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_level"]) -> typing.Union[MetaOapg.properties.log_level, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_level_http"]) -> typing.Union[MetaOapg.properties.log_level_http, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_requests"]) -> typing.Union[MetaOapg.properties.log_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["show_time_cost"]) -> typing.Union[MetaOapg.properties.show_time_cost, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_metrics"]) -> typing.Union[MetaOapg.properties.enable_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["decode_log_interval"]) -> typing.Union[MetaOapg.properties.decode_log_interval, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["api_key"]) -> typing.Union[MetaOapg.properties.api_key, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["file_storage_pth"]) -> typing.Union[MetaOapg.properties.file_storage_pth, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_cache_report"]) -> typing.Union[MetaOapg.properties.enable_cache_report, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["data_parallel_size"]) -> typing.Union[MetaOapg.properties.data_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_balance_method"]) -> typing.Union[MetaOapg.properties.load_balance_method, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["expert_parallel_size"]) -> typing.Union[MetaOapg.properties.expert_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dist_init_addr"]) -> typing.Union[MetaOapg.properties.dist_init_addr, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nnodes"]) -> typing.Union[MetaOapg.properties.nnodes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["node_rank"]) -> typing.Union[MetaOapg.properties.node_rank, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["json_model_override_args"]) -> typing.Union[MetaOapg.properties.json_model_override_args, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["lora_paths"]) -> typing.Union[MetaOapg.properties.lora_paths, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> typing.Union[MetaOapg.properties.max_loras_per_batch, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["sampling_backend"]) -> typing.Union[MetaOapg.properties.sampling_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["grammar_backend"]) -> typing.Union[MetaOapg.properties.grammar_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_algorithm"]) -> typing.Union[MetaOapg.properties.speculative_algorithm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> typing.Union[MetaOapg.properties.speculative_draft_model_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_steps"]) -> typing.Union[MetaOapg.properties.speculative_num_steps, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> typing.Union[MetaOapg.properties.speculative_num_draft_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> typing.Union[MetaOapg.properties.speculative_eagle_topk, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> typing.Union[MetaOapg.properties.enable_double_sparsity, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> typing.Union[MetaOapg.properties.ds_channel_config_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_num, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_token_num, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_type, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_radix_cache"]) -> typing.Union[MetaOapg.properties.disable_radix_cache, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_jump_forward"]) -> typing.Union[MetaOapg.properties.disable_jump_forward, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph_padding, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> typing.Union[MetaOapg.properties.disable_outlines_disk_cache, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> typing.Union[MetaOapg.properties.disable_custom_all_reduce, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_mla"]) -> typing.Union[MetaOapg.properties.disable_mla, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> typing.Union[MetaOapg.properties.disable_overlap_schedule, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> typing.Union[MetaOapg.properties.enable_mixed_chunk, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_dp_attention"]) -> typing.Union[MetaOapg.properties.enable_dp_attention, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_ep_moe"]) -> typing.Union[MetaOapg.properties.enable_ep_moe, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_torch_compile"]) -> typing.Union[MetaOapg.properties.enable_torch_compile, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> typing.Union[MetaOapg.properties.torch_compile_max_bs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_max_bs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_bs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["torchao_config"]) -> typing.Union[MetaOapg.properties.torchao_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_nan_detection"]) -> typing.Union[MetaOapg.properties.enable_nan_detection, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_p2p_check"]) -> typing.Union[MetaOapg.properties.enable_p2p_check, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> typing.Union[MetaOapg.properties.num_continuous_decode_steps, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> typing.Union[MetaOapg.properties.delete_ckpt_after_loading, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_memory_saver"]) -> typing.Union[MetaOapg.properties.enable_memory_saver, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> typing.Union[MetaOapg.properties.allow_auto_truncate, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> typing.Union[MetaOapg.properties.enable_custom_logit_processor, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["huggingface_repo"]) -> typing.Union[MetaOapg.properties.huggingface_repo, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", "inference_framework", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - name: typing.Union[MetaOapg.properties.name, str, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, - trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, - tp_size: typing.Union[MetaOapg.properties.tp_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, - load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, - dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, - kv_cache_dtype: typing.Union[MetaOapg.properties.kv_cache_dtype, None, str, schemas.Unset] = schemas.unset, - quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, - quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, - context_length: typing.Union[MetaOapg.properties.context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - device: typing.Union[MetaOapg.properties.device, None, str, schemas.Unset] = schemas.unset, - served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - is_embedding: typing.Union[MetaOapg.properties.is_embedding, None, bool, schemas.Unset] = schemas.unset, - revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, - mem_fraction_static: typing.Union[MetaOapg.properties.mem_fraction_static, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - max_running_requests: typing.Union[MetaOapg.properties.max_running_requests, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_total_tokens: typing.Union[MetaOapg.properties.max_total_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - chunked_prefill_size: typing.Union[MetaOapg.properties.chunked_prefill_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_prefill_tokens: typing.Union[MetaOapg.properties.max_prefill_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - schedule_policy: typing.Union[MetaOapg.properties.schedule_policy, None, str, schemas.Unset] = schemas.unset, - schedule_conservativeness: typing.Union[MetaOapg.properties.schedule_conservativeness, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - cpu_offload_gb: typing.Union[MetaOapg.properties.cpu_offload_gb, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - prefill_only_one_req: typing.Union[MetaOapg.properties.prefill_only_one_req, None, bool, schemas.Unset] = schemas.unset, - stream_interval: typing.Union[MetaOapg.properties.stream_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - random_seed: typing.Union[MetaOapg.properties.random_seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - constrained_json_whitespace_pattern: typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, - watchdog_timeout: typing.Union[MetaOapg.properties.watchdog_timeout, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - download_dir: typing.Union[MetaOapg.properties.download_dir, None, str, schemas.Unset] = schemas.unset, - base_gpu_id: typing.Union[MetaOapg.properties.base_gpu_id, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - log_level: typing.Union[MetaOapg.properties.log_level, None, str, schemas.Unset] = schemas.unset, - log_level_http: typing.Union[MetaOapg.properties.log_level_http, None, str, schemas.Unset] = schemas.unset, - log_requests: typing.Union[MetaOapg.properties.log_requests, None, bool, schemas.Unset] = schemas.unset, - show_time_cost: typing.Union[MetaOapg.properties.show_time_cost, None, bool, schemas.Unset] = schemas.unset, - enable_metrics: typing.Union[MetaOapg.properties.enable_metrics, None, bool, schemas.Unset] = schemas.unset, - decode_log_interval: typing.Union[MetaOapg.properties.decode_log_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - api_key: typing.Union[MetaOapg.properties.api_key, None, str, schemas.Unset] = schemas.unset, - file_storage_pth: typing.Union[MetaOapg.properties.file_storage_pth, None, str, schemas.Unset] = schemas.unset, - enable_cache_report: typing.Union[MetaOapg.properties.enable_cache_report, None, bool, schemas.Unset] = schemas.unset, - data_parallel_size: typing.Union[MetaOapg.properties.data_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - load_balance_method: typing.Union[MetaOapg.properties.load_balance_method, None, str, schemas.Unset] = schemas.unset, - expert_parallel_size: typing.Union[MetaOapg.properties.expert_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - dist_init_addr: typing.Union[MetaOapg.properties.dist_init_addr, None, str, schemas.Unset] = schemas.unset, - nnodes: typing.Union[MetaOapg.properties.nnodes, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - node_rank: typing.Union[MetaOapg.properties.node_rank, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - json_model_override_args: typing.Union[MetaOapg.properties.json_model_override_args, None, str, schemas.Unset] = schemas.unset, - lora_paths: typing.Union[MetaOapg.properties.lora_paths, list, tuple, None, schemas.Unset] = schemas.unset, - max_loras_per_batch: typing.Union[MetaOapg.properties.max_loras_per_batch, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, - sampling_backend: typing.Union[MetaOapg.properties.sampling_backend, None, str, schemas.Unset] = schemas.unset, - grammar_backend: typing.Union[MetaOapg.properties.grammar_backend, None, str, schemas.Unset] = schemas.unset, - speculative_algorithm: typing.Union[MetaOapg.properties.speculative_algorithm, None, str, schemas.Unset] = schemas.unset, - speculative_draft_model_path: typing.Union[MetaOapg.properties.speculative_draft_model_path, None, str, schemas.Unset] = schemas.unset, - speculative_num_steps: typing.Union[MetaOapg.properties.speculative_num_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - speculative_num_draft_tokens: typing.Union[MetaOapg.properties.speculative_num_draft_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - speculative_eagle_topk: typing.Union[MetaOapg.properties.speculative_eagle_topk, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enable_double_sparsity: typing.Union[MetaOapg.properties.enable_double_sparsity, None, bool, schemas.Unset] = schemas.unset, - ds_channel_config_path: typing.Union[MetaOapg.properties.ds_channel_config_path, None, str, schemas.Unset] = schemas.unset, - ds_heavy_channel_num: typing.Union[MetaOapg.properties.ds_heavy_channel_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - ds_heavy_token_num: typing.Union[MetaOapg.properties.ds_heavy_token_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - ds_heavy_channel_type: typing.Union[MetaOapg.properties.ds_heavy_channel_type, None, str, schemas.Unset] = schemas.unset, - ds_sparse_decode_threshold: typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - disable_radix_cache: typing.Union[MetaOapg.properties.disable_radix_cache, None, bool, schemas.Unset] = schemas.unset, - disable_jump_forward: typing.Union[MetaOapg.properties.disable_jump_forward, None, bool, schemas.Unset] = schemas.unset, - disable_cuda_graph: typing.Union[MetaOapg.properties.disable_cuda_graph, None, bool, schemas.Unset] = schemas.unset, - disable_cuda_graph_padding: typing.Union[MetaOapg.properties.disable_cuda_graph_padding, None, bool, schemas.Unset] = schemas.unset, - disable_outlines_disk_cache: typing.Union[MetaOapg.properties.disable_outlines_disk_cache, None, bool, schemas.Unset] = schemas.unset, - disable_custom_all_reduce: typing.Union[MetaOapg.properties.disable_custom_all_reduce, None, bool, schemas.Unset] = schemas.unset, - disable_mla: typing.Union[MetaOapg.properties.disable_mla, None, bool, schemas.Unset] = schemas.unset, - disable_overlap_schedule: typing.Union[MetaOapg.properties.disable_overlap_schedule, None, bool, schemas.Unset] = schemas.unset, - enable_mixed_chunk: typing.Union[MetaOapg.properties.enable_mixed_chunk, None, bool, schemas.Unset] = schemas.unset, - enable_dp_attention: typing.Union[MetaOapg.properties.enable_dp_attention, None, bool, schemas.Unset] = schemas.unset, - enable_ep_moe: typing.Union[MetaOapg.properties.enable_ep_moe, None, bool, schemas.Unset] = schemas.unset, - enable_torch_compile: typing.Union[MetaOapg.properties.enable_torch_compile, None, bool, schemas.Unset] = schemas.unset, - torch_compile_max_bs: typing.Union[MetaOapg.properties.torch_compile_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - cuda_graph_max_bs: typing.Union[MetaOapg.properties.cuda_graph_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - cuda_graph_bs: typing.Union[MetaOapg.properties.cuda_graph_bs, list, tuple, None, schemas.Unset] = schemas.unset, - torchao_config: typing.Union[MetaOapg.properties.torchao_config, None, str, schemas.Unset] = schemas.unset, - enable_nan_detection: typing.Union[MetaOapg.properties.enable_nan_detection, None, bool, schemas.Unset] = schemas.unset, - enable_p2p_check: typing.Union[MetaOapg.properties.enable_p2p_check, None, bool, schemas.Unset] = schemas.unset, - triton_attention_reduce_in_fp32: typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, None, bool, schemas.Unset] = schemas.unset, - triton_attention_num_kv_splits: typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - num_continuous_decode_steps: typing.Union[MetaOapg.properties.num_continuous_decode_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - delete_ckpt_after_loading: typing.Union[MetaOapg.properties.delete_ckpt_after_loading, None, bool, schemas.Unset] = schemas.unset, - enable_memory_saver: typing.Union[MetaOapg.properties.enable_memory_saver, None, bool, schemas.Unset] = schemas.unset, - allow_auto_truncate: typing.Union[MetaOapg.properties.allow_auto_truncate, None, bool, schemas.Unset] = schemas.unset, - enable_custom_logit_processor: typing.Union[MetaOapg.properties.enable_custom_logit_processor, None, bool, schemas.Unset] = schemas.unset, - tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, - huggingface_repo: typing.Union[MetaOapg.properties.huggingface_repo, None, str, schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateSGLangModelEndpointRequest': - return super().__new__( - cls, - *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - source=source, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - endpoint_type=endpoint_type, - trust_remote_code=trust_remote_code, - tp_size=tp_size, - skip_tokenizer_init=skip_tokenizer_init, - load_format=load_format, - dtype=dtype, - kv_cache_dtype=kv_cache_dtype, - quantization_param_path=quantization_param_path, - quantization=quantization, - context_length=context_length, - device=device, - served_model_name=served_model_name, - chat_template=chat_template, - is_embedding=is_embedding, - revision=revision, - mem_fraction_static=mem_fraction_static, - max_running_requests=max_running_requests, - max_total_tokens=max_total_tokens, - chunked_prefill_size=chunked_prefill_size, - max_prefill_tokens=max_prefill_tokens, - schedule_policy=schedule_policy, - schedule_conservativeness=schedule_conservativeness, - cpu_offload_gb=cpu_offload_gb, - prefill_only_one_req=prefill_only_one_req, - stream_interval=stream_interval, - random_seed=random_seed, - constrained_json_whitespace_pattern=constrained_json_whitespace_pattern, - watchdog_timeout=watchdog_timeout, - download_dir=download_dir, - base_gpu_id=base_gpu_id, - log_level=log_level, - log_level_http=log_level_http, - log_requests=log_requests, - show_time_cost=show_time_cost, - enable_metrics=enable_metrics, - decode_log_interval=decode_log_interval, - api_key=api_key, - file_storage_pth=file_storage_pth, - enable_cache_report=enable_cache_report, - data_parallel_size=data_parallel_size, - load_balance_method=load_balance_method, - expert_parallel_size=expert_parallel_size, - dist_init_addr=dist_init_addr, - nnodes=nnodes, - node_rank=node_rank, - json_model_override_args=json_model_override_args, - lora_paths=lora_paths, - max_loras_per_batch=max_loras_per_batch, - attention_backend=attention_backend, - sampling_backend=sampling_backend, - grammar_backend=grammar_backend, - speculative_algorithm=speculative_algorithm, - speculative_draft_model_path=speculative_draft_model_path, - speculative_num_steps=speculative_num_steps, - speculative_num_draft_tokens=speculative_num_draft_tokens, - speculative_eagle_topk=speculative_eagle_topk, - enable_double_sparsity=enable_double_sparsity, - ds_channel_config_path=ds_channel_config_path, - ds_heavy_channel_num=ds_heavy_channel_num, - ds_heavy_token_num=ds_heavy_token_num, - ds_heavy_channel_type=ds_heavy_channel_type, - ds_sparse_decode_threshold=ds_sparse_decode_threshold, - disable_radix_cache=disable_radix_cache, - disable_jump_forward=disable_jump_forward, - disable_cuda_graph=disable_cuda_graph, - disable_cuda_graph_padding=disable_cuda_graph_padding, - disable_outlines_disk_cache=disable_outlines_disk_cache, - disable_custom_all_reduce=disable_custom_all_reduce, - disable_mla=disable_mla, - disable_overlap_schedule=disable_overlap_schedule, - enable_mixed_chunk=enable_mixed_chunk, - enable_dp_attention=enable_dp_attention, - enable_ep_moe=enable_ep_moe, - enable_torch_compile=enable_torch_compile, - torch_compile_max_bs=torch_compile_max_bs, - cuda_graph_max_bs=cuda_graph_max_bs, - cuda_graph_bs=cuda_graph_bs, - torchao_config=torchao_config, - enable_nan_detection=enable_nan_detection, - enable_p2p_check=enable_p2p_check, - triton_attention_reduce_in_fp32=triton_attention_reduce_in_fp32, - triton_attention_num_kv_splits=triton_attention_num_kv_splits, - num_continuous_decode_steps=num_continuous_decode_steps, - delete_ckpt_after_loading=delete_ckpt_after_loading, - enable_memory_saver=enable_memory_saver, - allow_auto_truncate=allow_auto_truncate, - enable_custom_logit_processor=enable_custom_logit_processor, - tool_call_parser=tool_call_parser, - huggingface_repo=huggingface_repo, - inference_framework=inference_framework, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py b/launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py deleted file mode 100644 index c1fa53b7..00000000 --- a/launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py +++ /dev/null @@ -1,842 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateTensorRTLLMModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - name = schemas.StrSchema - model_name = schemas.StrSchema - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - min_workers = schemas.IntSchema - max_workers = schemas.IntSchema - per_worker = schemas.IntSchema - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "tensorrt_llm": "TENSORRT_LLM", - } - - @schemas.classproperty - def TENSORRT_LLM(cls): - return cls("tensorrt_llm") - __annotations__ = { - "name": name, - "model_name": model_name, - "metadata": metadata, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "source": source, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "endpoint_type": endpoint_type, - "inference_framework": inference_framework, - } - - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - name: typing.Union[MetaOapg.properties.name, str, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateTensorRTLLMModelEndpointRequest': - return super().__new__( - cls, - *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - source=source, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - endpoint_type=endpoint_type, - inference_framework=inference_framework, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_text_generation_inference_model_endpoint_request.py b/launch/api_client/model/create_text_generation_inference_model_endpoint_request.py deleted file mode 100644 index b6aa42de..00000000 --- a/launch/api_client/model/create_text_generation_inference_model_endpoint_request.py +++ /dev/null @@ -1,842 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateTextGenerationInferenceModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - name = schemas.StrSchema - model_name = schemas.StrSchema - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - min_workers = schemas.IntSchema - max_workers = schemas.IntSchema - per_worker = schemas.IntSchema - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "text_generation_inference": "TEXT_GENERATION_INFERENCE", - } - - @schemas.classproperty - def TEXT_GENERATION_INFERENCE(cls): - return cls("text_generation_inference") - __annotations__ = { - "name": name, - "model_name": model_name, - "metadata": metadata, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "source": source, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "endpoint_type": endpoint_type, - "inference_framework": inference_framework, - } - - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - name: typing.Union[MetaOapg.properties.name, str, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateTextGenerationInferenceModelEndpointRequest': - return super().__new__( - cls, - *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - source=source, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - endpoint_type=endpoint_type, - inference_framework=inference_framework, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_trigger_v1_request.py b/launch/api_client/model/create_trigger_v1_request.py deleted file mode 100644 index 93e7db58..00000000 --- a/launch/api_client/model/create_trigger_v1_request.py +++ /dev/null @@ -1,191 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateTriggerV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "cron_schedule", - "bundle_id", - "name", - } - - class properties: - name = schemas.StrSchema - cron_schedule = schemas.StrSchema - bundle_id = schemas.StrSchema - - - class default_job_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'default_job_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_job_metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'default_job_metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "name": name, - "cron_schedule": cron_schedule, - "bundle_id": bundle_id, - "default_job_config": default_job_config, - "default_job_metadata": default_job_metadata, - } - - cron_schedule: MetaOapg.properties.cron_schedule - bundle_id: MetaOapg.properties.bundle_id - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_job_config"]) -> MetaOapg.properties.default_job_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_job_metadata"]) -> MetaOapg.properties.default_job_metadata: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "cron_schedule", "bundle_id", "default_job_config", "default_job_metadata", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_job_config"]) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_job_metadata"]) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "cron_schedule", "bundle_id", "default_job_config", "default_job_metadata", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, str, ], - bundle_id: typing.Union[MetaOapg.properties.bundle_id, str, ], - name: typing.Union[MetaOapg.properties.name, str, ], - default_job_config: typing.Union[MetaOapg.properties.default_job_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_job_metadata: typing.Union[MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateTriggerV1Request': - return super().__new__( - cls, - *_args, - cron_schedule=cron_schedule, - bundle_id=bundle_id, - name=name, - default_job_config=default_job_config, - default_job_metadata=default_job_metadata, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_trigger_v1_response.py b/launch/api_client/model/create_trigger_v1_response.py deleted file mode 100644 index 7ce0aad3..00000000 --- a/launch/api_client/model/create_trigger_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateTriggerV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "trigger_id", - } - - class properties: - trigger_id = schemas.StrSchema - __annotations__ = { - "trigger_id": trigger_id, - } - - trigger_id: MetaOapg.properties.trigger_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["trigger_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["trigger_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - trigger_id: typing.Union[MetaOapg.properties.trigger_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateTriggerV1Response': - return super().__new__( - cls, - *_args, - trigger_id=trigger_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_vllm_model_endpoint_request.py b/launch/api_client/model/create_vllm_model_endpoint_request.py deleted file mode 100644 index c3d4cfbc..00000000 --- a/launch/api_client/model/create_vllm_model_endpoint_request.py +++ /dev/null @@ -1,1983 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CreateVLLMModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - name = schemas.StrSchema - model_name = schemas.StrSchema - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - min_workers = schemas.IntSchema - max_workers = schemas.IntSchema - per_worker = schemas.IntSchema - - - class labels( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - - - class max_gpu_memory_utilization( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_gpu_memory_utilization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class attention_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'attention_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_model_len( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_model_len': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_seqs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_seqs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enforce_eager( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enforce_eager': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class trust_remote_code( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'trust_remote_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class pipeline_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'pipeline_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tensor_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tensor_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_log_requests( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_log_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tool_call_parser( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tool_call_parser': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_auto_tool_choice( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_auto_tool_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class config_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'config_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer_mode( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_mode': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class limit_mm_per_prompt( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'limit_mm_per_prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_batched_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_batched_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class code_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'code_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class rope_scaling( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'rope_scaling': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class tokenizer_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization_param_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization_param_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_seq_len_to_capture( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_seq_len_to_capture': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_sliding_window( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_sliding_window': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_tokenizer_init( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_tokenizer_init': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class served_model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'served_model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class override_neuron_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'override_neuron_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class mm_processor_kwargs( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'mm_processor_kwargs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class block_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'block_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class gpu_memory_utilization( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpu_memory_utilization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class swap_space( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'swap_space': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cache_dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cache_dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_gpu_blocks_override( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_gpu_blocks_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_prefix_caching( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_prefix_caching': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "vllm": "VLLM", - } - - @schemas.classproperty - def VLLM(cls): - return cls("vllm") - __annotations__ = { - "name": name, - "model_name": model_name, - "metadata": metadata, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "source": source, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "endpoint_type": endpoint_type, - "max_gpu_memory_utilization": max_gpu_memory_utilization, - "attention_backend": attention_backend, - "max_model_len": max_model_len, - "max_num_seqs": max_num_seqs, - "enforce_eager": enforce_eager, - "trust_remote_code": trust_remote_code, - "pipeline_parallel_size": pipeline_parallel_size, - "tensor_parallel_size": tensor_parallel_size, - "quantization": quantization, - "disable_log_requests": disable_log_requests, - "chat_template": chat_template, - "tool_call_parser": tool_call_parser, - "enable_auto_tool_choice": enable_auto_tool_choice, - "load_format": load_format, - "config_format": config_format, - "tokenizer_mode": tokenizer_mode, - "limit_mm_per_prompt": limit_mm_per_prompt, - "max_num_batched_tokens": max_num_batched_tokens, - "tokenizer": tokenizer, - "dtype": dtype, - "seed": seed, - "revision": revision, - "code_revision": code_revision, - "rope_scaling": rope_scaling, - "tokenizer_revision": tokenizer_revision, - "quantization_param_path": quantization_param_path, - "max_seq_len_to_capture": max_seq_len_to_capture, - "disable_sliding_window": disable_sliding_window, - "skip_tokenizer_init": skip_tokenizer_init, - "served_model_name": served_model_name, - "override_neuron_config": override_neuron_config, - "mm_processor_kwargs": mm_processor_kwargs, - "block_size": block_size, - "gpu_memory_utilization": gpu_memory_utilization, - "swap_space": swap_space, - "cache_dtype": cache_dtype, - "num_gpu_blocks_override": num_gpu_blocks_override, - "enable_prefix_caching": enable_prefix_caching, - "inference_framework": inference_framework, - } - - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> MetaOapg.properties.max_gpu_memory_utilization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "inference_framework", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.max_gpu_memory_utilization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "inference_framework", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - name: typing.Union[MetaOapg.properties.name, str, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, - max_gpu_memory_utilization: typing.Union[MetaOapg.properties.max_gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, - max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, - trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, - pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, - disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, - enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, - load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, - config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, - tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, - limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, - max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, - dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, - code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, - rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, - quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, - max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, - skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, - served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, - override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, - num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CreateVLLMModelEndpointRequest': - return super().__new__( - cls, - *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - source=source, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - endpoint_type=endpoint_type, - max_gpu_memory_utilization=max_gpu_memory_utilization, - attention_backend=attention_backend, - max_model_len=max_model_len, - max_num_seqs=max_num_seqs, - enforce_eager=enforce_eager, - trust_remote_code=trust_remote_code, - pipeline_parallel_size=pipeline_parallel_size, - tensor_parallel_size=tensor_parallel_size, - quantization=quantization, - disable_log_requests=disable_log_requests, - chat_template=chat_template, - tool_call_parser=tool_call_parser, - enable_auto_tool_choice=enable_auto_tool_choice, - load_format=load_format, - config_format=config_format, - tokenizer_mode=tokenizer_mode, - limit_mm_per_prompt=limit_mm_per_prompt, - max_num_batched_tokens=max_num_batched_tokens, - tokenizer=tokenizer, - dtype=dtype, - seed=seed, - revision=revision, - code_revision=code_revision, - rope_scaling=rope_scaling, - tokenizer_revision=tokenizer_revision, - quantization_param_path=quantization_param_path, - max_seq_len_to_capture=max_seq_len_to_capture, - disable_sliding_window=disable_sliding_window, - skip_tokenizer_init=skip_tokenizer_init, - served_model_name=served_model_name, - override_neuron_config=override_neuron_config, - mm_processor_kwargs=mm_processor_kwargs, - block_size=block_size, - gpu_memory_utilization=gpu_memory_utilization, - swap_space=swap_space, - cache_dtype=cache_dtype, - num_gpu_blocks_override=num_gpu_blocks_override, - enable_prefix_caching=enable_prefix_caching, - inference_framework=inference_framework, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/custom_framework.py b/launch/api_client/model/custom_framework.py deleted file mode 100644 index 6af59b6d..00000000 --- a/launch/api_client/model/custom_framework.py +++ /dev/null @@ -1,124 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class CustomFramework( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a custom framework specification. - """ - - - class MetaOapg: - required = { - "image_repository", - "framework_type", - "image_tag", - } - - class properties: - - - class framework_type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "custom_base_image": "CUSTOM_BASE_IMAGE", - } - - @schemas.classproperty - def CUSTOM_BASE_IMAGE(cls): - return cls("custom_base_image") - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - __annotations__ = { - "framework_type": framework_type, - "image_repository": image_repository, - "image_tag": image_tag, - } - - image_repository: MetaOapg.properties.image_repository - framework_type: MetaOapg.properties.framework_type - image_tag: MetaOapg.properties.image_tag - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "image_repository", "image_tag", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "image_repository", "image_tag", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - image_repository: typing.Union[MetaOapg.properties.image_repository, str, ], - framework_type: typing.Union[MetaOapg.properties.framework_type, str, ], - image_tag: typing.Union[MetaOapg.properties.image_tag, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'CustomFramework': - return super().__new__( - cls, - *_args, - image_repository=image_repository, - framework_type=framework_type, - image_tag=image_tag, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/delete_file_response.py b/launch/api_client/model/delete_file_response.py deleted file mode 100644 index eb5a0fab..00000000 --- a/launch/api_client/model/delete_file_response.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class DeleteFileResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for deleting a file. - """ - - - class MetaOapg: - required = { - "deleted", - } - - class properties: - deleted = schemas.BoolSchema - __annotations__ = { - "deleted": deleted, - } - - deleted: MetaOapg.properties.deleted - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - deleted: typing.Union[MetaOapg.properties.deleted, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'DeleteFileResponse': - return super().__new__( - cls, - *_args, - deleted=deleted, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/delete_llm_endpoint_response.py b/launch/api_client/model/delete_llm_endpoint_response.py deleted file mode 100644 index c3a2a8c5..00000000 --- a/launch/api_client/model/delete_llm_endpoint_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class DeleteLLMEndpointResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "deleted", - } - - class properties: - deleted = schemas.BoolSchema - __annotations__ = { - "deleted": deleted, - } - - deleted: MetaOapg.properties.deleted - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - deleted: typing.Union[MetaOapg.properties.deleted, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'DeleteLLMEndpointResponse': - return super().__new__( - cls, - *_args, - deleted=deleted, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/delete_model_endpoint_v1_response.py b/launch/api_client/model/delete_model_endpoint_v1_response.py deleted file mode 100644 index 1c9c6587..00000000 --- a/launch/api_client/model/delete_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class DeleteModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "deleted", - } - - class properties: - deleted = schemas.BoolSchema - __annotations__ = { - "deleted": deleted, - } - - deleted: MetaOapg.properties.deleted - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - deleted: typing.Union[MetaOapg.properties.deleted, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'DeleteModelEndpointV1Response': - return super().__new__( - cls, - *_args, - deleted=deleted, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/delete_trigger_v1_response.py b/launch/api_client/model/delete_trigger_v1_response.py deleted file mode 100644 index c313ec99..00000000 --- a/launch/api_client/model/delete_trigger_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class DeleteTriggerV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - success: typing.Union[MetaOapg.properties.success, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'DeleteTriggerV1Response': - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/docker_image_batch_job.py b/launch/api_client/model/docker_image_batch_job.py deleted file mode 100644 index 66d52246..00000000 --- a/launch/api_client/model/docker_image_batch_job.py +++ /dev/null @@ -1,273 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class DockerImageBatchJob( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job -created via the "supply a docker image for a k8s job" API. - """ - - - class MetaOapg: - required = { - "owner", - "created_at", - "id", - "created_by", - "status", - } - - class properties: - id = schemas.StrSchema - created_by = schemas.StrSchema - owner = schemas.StrSchema - created_at = schemas.DateTimeSchema - - @staticmethod - def status() -> typing.Type['BatchJobStatus']: - return BatchJobStatus - - - class completed_at( - schemas.DateTimeBase, - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - class MetaOapg: - format = 'date-time' - - - def __new__( - cls, - *_args: typing.Union[None, str, datetime, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'completed_at': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class annotations( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'annotations': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class override_job_max_runtime_s( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'override_job_max_runtime_s': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "id": id, - "created_by": created_by, - "owner": owner, - "created_at": created_at, - "status": status, - "completed_at": completed_at, - "annotations": annotations, - "override_job_max_runtime_s": override_job_max_runtime_s, - "num_workers": num_workers, - } - - owner: MetaOapg.properties.owner - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - created_by: MetaOapg.properties.created_by - status: 'BatchJobStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["annotations"]) -> MetaOapg.properties.annotations: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> MetaOapg.properties.override_job_max_runtime_s: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_workers"]) -> MetaOapg.properties.num_workers: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "created_by", "owner", "created_at", "status", "completed_at", "annotations", "override_job_max_runtime_s", "num_workers", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["completed_at"]) -> typing.Union[MetaOapg.properties.completed_at, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["annotations"]) -> typing.Union[MetaOapg.properties.annotations, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_workers"]) -> typing.Union[MetaOapg.properties.num_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "created_by", "owner", "created_at", "status", "completed_at", "annotations", "override_job_max_runtime_s", "num_workers", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - owner: typing.Union[MetaOapg.properties.owner, str, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], - id: typing.Union[MetaOapg.properties.id, str, ], - created_by: typing.Union[MetaOapg.properties.created_by, str, ], - status: 'BatchJobStatus', - completed_at: typing.Union[MetaOapg.properties.completed_at, None, str, datetime, schemas.Unset] = schemas.unset, - annotations: typing.Union[MetaOapg.properties.annotations, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - override_job_max_runtime_s: typing.Union[MetaOapg.properties.override_job_max_runtime_s, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - num_workers: typing.Union[MetaOapg.properties.num_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'DockerImageBatchJob': - return super().__new__( - cls, - *_args, - owner=owner, - created_at=created_at, - id=id, - created_by=created_by, - status=status, - completed_at=completed_at, - annotations=annotations, - override_job_max_runtime_s=override_job_max_runtime_s, - num_workers=num_workers, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/docker_image_batch_job_bundle_v1_response.py b/launch/api_client/model/docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index bebc5aaf..00000000 --- a/launch/api_client/model/docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,408 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class DockerImageBatchJobBundleV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "image_repository", - "name", - "created_at", - "id", - "image_tag", - "env", - "command", - } - - class properties: - id = schemas.StrSchema - name = schemas.StrSchema - created_at = schemas.DateTimeSchema - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - - - class command( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'command': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class env( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class mount_location( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'mount_location': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class storage( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class gpu_type( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpu_type': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class public( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "id": id, - "name": name, - "created_at": created_at, - "image_repository": image_repository, - "image_tag": image_tag, - "command": command, - "env": env, - "mount_location": mount_location, - "cpus": cpus, - "memory": memory, - "storage": storage, - "gpus": gpus, - "gpu_type": gpu_type, - "public": public, - } - - image_repository: MetaOapg.properties.image_repository - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - image_tag: MetaOapg.properties.image_tag - env: MetaOapg.properties.env - command: MetaOapg.properties.command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> MetaOapg.properties.gpu_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "created_at", "image_repository", "image_tag", "command", "env", "mount_location", "cpus", "memory", "storage", "gpus", "gpu_type", "public", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mount_location"]) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union[MetaOapg.properties.gpu_type, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public"]) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "created_at", "image_repository", "image_tag", "command", "env", "mount_location", "cpus", "memory", "storage", "gpus", "gpu_type", "public", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - image_repository: typing.Union[MetaOapg.properties.image_repository, str, ], - name: typing.Union[MetaOapg.properties.name, str, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], - id: typing.Union[MetaOapg.properties.id, str, ], - image_tag: typing.Union[MetaOapg.properties.image_tag, str, ], - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, ], - command: typing.Union[MetaOapg.properties.command, list, tuple, ], - mount_location: typing.Union[MetaOapg.properties.mount_location, None, str, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, None, str, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, None, str, schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, None, str, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - gpu_type: typing.Union[MetaOapg.properties.gpu_type, None, str, schemas.Unset] = schemas.unset, - public: typing.Union[MetaOapg.properties.public, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'DockerImageBatchJobBundleV1Response': - return super().__new__( - cls, - *_args, - image_repository=image_repository, - name=name, - created_at=created_at, - id=id, - image_tag=image_tag, - env=env, - command=command, - mount_location=mount_location, - cpus=cpus, - memory=memory, - storage=storage, - gpus=gpus, - gpu_type=gpu_type, - public=public, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/endpoint_predict_v1_request.py b/launch/api_client/model/endpoint_predict_v1_request.py deleted file mode 100644 index 64c753c1..00000000 --- a/launch/api_client/model/endpoint_predict_v1_request.py +++ /dev/null @@ -1,219 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class EndpointPredictV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - args = schemas.AnyTypeSchema - - - class cloudpickle( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cloudpickle': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - return_pickled = schemas.BoolSchema - - - class destination_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'destination_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "url": url, - "args": args, - "cloudpickle": cloudpickle, - "callback_url": callback_url, - "callback_auth": callback_auth, - "return_pickled": return_pickled, - "destination_path": destination_path, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["destination_path"]) -> MetaOapg.properties.destination_path: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["args"]) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cloudpickle"]) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["callback_url"]) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["return_pickled"]) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["destination_path"]) -> typing.Union[MetaOapg.properties.destination_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - url: typing.Union[MetaOapg.properties.url, None, str, schemas.Unset] = schemas.unset, - args: typing.Union[MetaOapg.properties.args, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, None, str, schemas.Unset] = schemas.unset, - callback_url: typing.Union[MetaOapg.properties.callback_url, None, str, schemas.Unset] = schemas.unset, - callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - return_pickled: typing.Union[MetaOapg.properties.return_pickled, bool, schemas.Unset] = schemas.unset, - destination_path: typing.Union[MetaOapg.properties.destination_path, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'EndpointPredictV1Request': - return super().__new__( - cls, - *_args, - url=url, - args=args, - cloudpickle=cloudpickle, - callback_url=callback_url, - callback_auth=callback_auth, - return_pickled=return_pickled, - destination_path=destination_path, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth diff --git a/launch/api_client/model/file.py b/launch/api_client/model/file.py deleted file mode 100644 index 5837ed26..00000000 --- a/launch/api_client/model/file.py +++ /dev/null @@ -1,155 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class File( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class filename( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'filename': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class file_data( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'file_data': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class file_id( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'file_id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "filename": filename, - "file_data": file_data, - "file_id": file_id, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["file_data"]) -> MetaOapg.properties.file_data: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["file_id"]) -> MetaOapg.properties.file_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["filename", "file_data", "file_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["filename"]) -> typing.Union[MetaOapg.properties.filename, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["file_data"]) -> typing.Union[MetaOapg.properties.file_data, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["file_id"]) -> typing.Union[MetaOapg.properties.file_id, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["filename", "file_data", "file_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - filename: typing.Union[MetaOapg.properties.filename, None, str, schemas.Unset] = schemas.unset, - file_data: typing.Union[MetaOapg.properties.file_data, None, str, schemas.Unset] = schemas.unset, - file_id: typing.Union[MetaOapg.properties.file_id, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'File': - return super().__new__( - cls, - *_args, - filename=filename, - file_data=file_data, - file_id=file_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/filtered_chat_completion_v2_request.py b/launch/api_client/model/filtered_chat_completion_v2_request.py deleted file mode 100644 index d40f2fe0..00000000 --- a/launch/api_client/model/filtered_chat_completion_v2_request.py +++ /dev/null @@ -1,1805 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class FilteredChatCompletionV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "messages", - } - - class properties: - - - class messages( - schemas.ListSchema - ): - - - class MetaOapg: - min_items = 1 - - @staticmethod - def items() -> typing.Type['ChatCompletionRequestMessage']: - return ChatCompletionRequestMessage - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessage'], typing.List['ChatCompletionRequestMessage']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'messages': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ChatCompletionRequestMessage': - return super().__getitem__(i) - - - class best_of( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'best_of': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_k( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = -1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_k': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class use_beam_search( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'use_beam_search': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class length_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'length_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class repetition_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'repetition_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class early_stopping( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'early_stopping': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stop_token_ids( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stop_token_ids': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class include_stop_str_in_output( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'include_stop_str_in_output': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ignore_eos( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ignore_eos': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class spaces_between_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'spaces_between_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class echo( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'echo': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class add_generation_prompt( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'add_generation_prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class continue_final_message( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'continue_final_message': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class add_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'add_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class documents( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - - class items( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'items': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'documents': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_kwargs( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'chat_template_kwargs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_json( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'guided_json': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_regex( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_regex': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_choice( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_grammar( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_grammar': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_decoding_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_decoding_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_whitespace_pattern( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_whitespace_pattern': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class priority( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def metadata() -> typing.Type['Metadata']: - return Metadata - - - class temperature( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'temperature': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class user( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'user': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def service_tier() -> typing.Type['ServiceTier']: - return ServiceTier - - - class model( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def modalities() -> typing.Type['ResponseModalities']: - return ResponseModalities - - @staticmethod - def reasoning_effort() -> typing.Type['ReasoningEffort']: - return ReasoningEffort - - - class max_completion_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_completion_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class frequency_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'frequency_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class presence_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'presence_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def web_search_options() -> typing.Type['WebSearchOptions']: - return WebSearchOptions - - - class top_logprobs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 20 - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class response_format( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ResponseFormatText, - ResponseFormatJsonSchema, - ResponseFormatJsonObject, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'response_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def audio() -> typing.Type['Audio2']: - return Audio2 - - - class store( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'store': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stream( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stream': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stop() -> typing.Type['StopConfiguration']: - return StopConfiguration - - - class logit_bias( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.IntSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], - ) -> 'logit_bias': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class logprobs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class n( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 128 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'n': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def prediction() -> typing.Type['PredictionContent']: - return PredictionContent - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = -9223372036854775616 - inclusive_minimum = 9223372036854775616 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stream_options() -> typing.Type['ChatCompletionStreamOptions']: - return ChatCompletionStreamOptions - - - class tools( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionTool']: - return ChatCompletionTool - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tools': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def tool_choice() -> typing.Type['ChatCompletionToolChoiceOption']: - return ChatCompletionToolChoiceOption - parallel_tool_calls = schemas.BoolSchema - - - class function_call( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - - class any_of_0( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "none": "NONE", - "auto": "AUTO", - } - - @schemas.classproperty - def NONE(cls): - return cls("none") - - @schemas.classproperty - def AUTO(cls): - return cls("auto") - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - ChatCompletionFunctionCallOption, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'function_call': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class functions( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionFunctions']: - return ChatCompletionFunctions - max_items = 128 - min_items = 1 - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'functions': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "messages": messages, - "best_of": best_of, - "top_k": top_k, - "min_p": min_p, - "use_beam_search": use_beam_search, - "length_penalty": length_penalty, - "repetition_penalty": repetition_penalty, - "early_stopping": early_stopping, - "stop_token_ids": stop_token_ids, - "include_stop_str_in_output": include_stop_str_in_output, - "ignore_eos": ignore_eos, - "min_tokens": min_tokens, - "skip_special_tokens": skip_special_tokens, - "spaces_between_special_tokens": spaces_between_special_tokens, - "echo": echo, - "add_generation_prompt": add_generation_prompt, - "continue_final_message": continue_final_message, - "add_special_tokens": add_special_tokens, - "documents": documents, - "chat_template": chat_template, - "chat_template_kwargs": chat_template_kwargs, - "guided_json": guided_json, - "guided_regex": guided_regex, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_decoding_backend": guided_decoding_backend, - "guided_whitespace_pattern": guided_whitespace_pattern, - "priority": priority, - "metadata": metadata, - "temperature": temperature, - "top_p": top_p, - "user": user, - "service_tier": service_tier, - "model": model, - "modalities": modalities, - "reasoning_effort": reasoning_effort, - "max_completion_tokens": max_completion_tokens, - "frequency_penalty": frequency_penalty, - "presence_penalty": presence_penalty, - "web_search_options": web_search_options, - "top_logprobs": top_logprobs, - "response_format": response_format, - "audio": audio, - "store": store, - "stream": stream, - "stop": stop, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_tokens": max_tokens, - "n": n, - "prediction": prediction, - "seed": seed, - "stream_options": stream_options, - "tools": tools, - "tool_choice": tool_choice, - "parallel_tool_calls": parallel_tool_calls, - "function_call": function_call, - "functions": functions, - } - - messages: MetaOapg.properties.messages - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["add_generation_prompt"]) -> MetaOapg.properties.add_generation_prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["continue_final_message"]) -> MetaOapg.properties.continue_final_message: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["documents"]) -> MetaOapg.properties.documents: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> MetaOapg.properties.chat_template_kwargs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> 'Metadata': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["modalities"]) -> 'ResponseModalities': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["reasoning_effort"]) -> 'ReasoningEffort': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_completion_tokens"]) -> MetaOapg.properties.max_completion_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["web_search_options"]) -> 'WebSearchOptions': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio2': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["store"]) -> MetaOapg.properties.store: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prediction"]) -> 'PredictionContent': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tools"]) -> MetaOapg.properties.tools: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_choice"]) -> 'ChatCompletionToolChoiceOption': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> MetaOapg.properties.parallel_tool_calls: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> MetaOapg.properties.function_call: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["functions"]) -> MetaOapg.properties.functions: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["messages", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "model", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["add_generation_prompt"]) -> typing.Union[MetaOapg.properties.add_generation_prompt, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["continue_final_message"]) -> typing.Union[MetaOapg.properties.continue_final_message, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["documents"]) -> typing.Union[MetaOapg.properties.documents, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> typing.Union[MetaOapg.properties.chat_template_kwargs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union['Metadata', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> typing.Union[MetaOapg.properties.model, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["modalities"]) -> typing.Union['ResponseModalities', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["reasoning_effort"]) -> typing.Union['ReasoningEffort', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_completion_tokens"]) -> typing.Union[MetaOapg.properties.max_completion_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["web_search_options"]) -> typing.Union['WebSearchOptions', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> typing.Union[MetaOapg.properties.top_logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio2', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["store"]) -> typing.Union[MetaOapg.properties.store, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prediction"]) -> typing.Union['PredictionContent', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tools"]) -> typing.Union[MetaOapg.properties.tools, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_choice"]) -> typing.Union['ChatCompletionToolChoiceOption', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> typing.Union[MetaOapg.properties.parallel_tool_calls, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union[MetaOapg.properties.function_call, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["functions"]) -> typing.Union[MetaOapg.properties.functions, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["messages", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "model", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - messages: typing.Union[MetaOapg.properties.messages, list, tuple, ], - best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, - length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, - stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, - ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, - min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, - spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, - echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, - add_generation_prompt: typing.Union[MetaOapg.properties.add_generation_prompt, None, bool, schemas.Unset] = schemas.unset, - continue_final_message: typing.Union[MetaOapg.properties.continue_final_message, None, bool, schemas.Unset] = schemas.unset, - add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, - documents: typing.Union[MetaOapg.properties.documents, list, tuple, None, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - chat_template_kwargs: typing.Union[MetaOapg.properties.chat_template_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, - guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, - guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, - priority: typing.Union[MetaOapg.properties.priority, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - metadata: typing.Union['Metadata', schemas.Unset] = schemas.unset, - temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, - service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, - model: typing.Union[MetaOapg.properties.model, None, str, schemas.Unset] = schemas.unset, - modalities: typing.Union['ResponseModalities', schemas.Unset] = schemas.unset, - reasoning_effort: typing.Union['ReasoningEffort', schemas.Unset] = schemas.unset, - max_completion_tokens: typing.Union[MetaOapg.properties.max_completion_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - web_search_options: typing.Union['WebSearchOptions', schemas.Unset] = schemas.unset, - top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - audio: typing.Union['Audio2', schemas.Unset] = schemas.unset, - store: typing.Union[MetaOapg.properties.store, None, bool, schemas.Unset] = schemas.unset, - stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, - stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, - logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - logprobs: typing.Union[MetaOapg.properties.logprobs, None, bool, schemas.Unset] = schemas.unset, - max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - prediction: typing.Union['PredictionContent', schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, - tools: typing.Union[MetaOapg.properties.tools, list, tuple, None, schemas.Unset] = schemas.unset, - tool_choice: typing.Union['ChatCompletionToolChoiceOption', schemas.Unset] = schemas.unset, - parallel_tool_calls: typing.Union[MetaOapg.properties.parallel_tool_calls, bool, schemas.Unset] = schemas.unset, - function_call: typing.Union[MetaOapg.properties.function_call, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - functions: typing.Union[MetaOapg.properties.functions, list, tuple, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'FilteredChatCompletionV2Request': - return super().__new__( - cls, - *_args, - messages=messages, - best_of=best_of, - top_k=top_k, - min_p=min_p, - use_beam_search=use_beam_search, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - early_stopping=early_stopping, - stop_token_ids=stop_token_ids, - include_stop_str_in_output=include_stop_str_in_output, - ignore_eos=ignore_eos, - min_tokens=min_tokens, - skip_special_tokens=skip_special_tokens, - spaces_between_special_tokens=spaces_between_special_tokens, - echo=echo, - add_generation_prompt=add_generation_prompt, - continue_final_message=continue_final_message, - add_special_tokens=add_special_tokens, - documents=documents, - chat_template=chat_template, - chat_template_kwargs=chat_template_kwargs, - guided_json=guided_json, - guided_regex=guided_regex, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_decoding_backend=guided_decoding_backend, - guided_whitespace_pattern=guided_whitespace_pattern, - priority=priority, - metadata=metadata, - temperature=temperature, - top_p=top_p, - user=user, - service_tier=service_tier, - model=model, - modalities=modalities, - reasoning_effort=reasoning_effort, - max_completion_tokens=max_completion_tokens, - frequency_penalty=frequency_penalty, - presence_penalty=presence_penalty, - web_search_options=web_search_options, - top_logprobs=top_logprobs, - response_format=response_format, - audio=audio, - store=store, - stream=stream, - stop=stop, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - prediction=prediction, - seed=seed, - stream_options=stream_options, - tools=tools, - tool_choice=tool_choice, - parallel_tool_calls=parallel_tool_calls, - function_call=function_call, - functions=functions, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.audio2 import Audio2 -from launch.api_client.model.chat_completion_function_call_option import ( - ChatCompletionFunctionCallOption, -) -from launch.api_client.model.chat_completion_functions import ( - ChatCompletionFunctions, -) -from launch.api_client.model.chat_completion_request_message import ( - ChatCompletionRequestMessage, -) -from launch.api_client.model.chat_completion_stream_options import ( - ChatCompletionStreamOptions, -) -from launch.api_client.model.chat_completion_tool import ChatCompletionTool -from launch.api_client.model.chat_completion_tool_choice_option import ( - ChatCompletionToolChoiceOption, -) -from launch.api_client.model.metadata import Metadata -from launch.api_client.model.prediction_content import PredictionContent -from launch.api_client.model.reasoning_effort import ReasoningEffort -from launch.api_client.model.response_format_json_object import ( - ResponseFormatJsonObject, -) -from launch.api_client.model.response_format_json_schema import ( - ResponseFormatJsonSchema, -) -from launch.api_client.model.response_format_text import ResponseFormatText -from launch.api_client.model.response_modalities import ResponseModalities -from launch.api_client.model.service_tier import ServiceTier -from launch.api_client.model.stop_configuration import StopConfiguration -from launch.api_client.model.web_search_options import WebSearchOptions diff --git a/launch/api_client/model/filtered_completion_v2_request.py b/launch/api_client/model/filtered_completion_v2_request.py deleted file mode 100644 index 3659e4cc..00000000 --- a/launch/api_client/model/filtered_completion_v2_request.py +++ /dev/null @@ -1,1295 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class FilteredCompletionV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "prompt", - } - - class properties: - - - class prompt( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - - class any_of_1( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'any_of_1': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - Prompt, - Prompt1, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class best_of( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 20 - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'best_of': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_k( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = -1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_k': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class use_beam_search( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'use_beam_search': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class length_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'length_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class repetition_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'repetition_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class early_stopping( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'early_stopping': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stop_token_ids( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stop_token_ids': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class include_stop_str_in_output( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'include_stop_str_in_output': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ignore_eos( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ignore_eos': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class spaces_between_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'spaces_between_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class add_special_tokens( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'add_special_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class response_format( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ResponseFormatText, - ResponseFormatJsonSchema, - ResponseFormatJsonObject, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'response_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_json( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'guided_json': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class guided_regex( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_regex': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_choice( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_grammar( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_grammar': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_decoding_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_decoding_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class guided_whitespace_pattern( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'guided_whitespace_pattern': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class model( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class echo( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'echo': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class frequency_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'frequency_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class logit_bias( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.IntSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], - ) -> 'logit_bias': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class logprobs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 5 - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class n( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 128 - inclusive_minimum = 1 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'n': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class presence_penalty( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = -2.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'presence_penalty': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stop() -> typing.Type['StopConfiguration']: - return StopConfiguration - - - class stream( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stream': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def stream_options() -> typing.Type['ChatCompletionStreamOptions']: - return ChatCompletionStreamOptions - - - class suffix( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'suffix': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class temperature( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'temperature': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_p( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_maximum = 1.0 - inclusive_minimum = 0.0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_p': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class user( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'user': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "prompt": prompt, - "best_of": best_of, - "top_k": top_k, - "min_p": min_p, - "use_beam_search": use_beam_search, - "length_penalty": length_penalty, - "repetition_penalty": repetition_penalty, - "early_stopping": early_stopping, - "stop_token_ids": stop_token_ids, - "include_stop_str_in_output": include_stop_str_in_output, - "ignore_eos": ignore_eos, - "min_tokens": min_tokens, - "skip_special_tokens": skip_special_tokens, - "spaces_between_special_tokens": spaces_between_special_tokens, - "add_special_tokens": add_special_tokens, - "response_format": response_format, - "guided_json": guided_json, - "guided_regex": guided_regex, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_decoding_backend": guided_decoding_backend, - "guided_whitespace_pattern": guided_whitespace_pattern, - "model": model, - "echo": echo, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_tokens": max_tokens, - "n": n, - "presence_penalty": presence_penalty, - "seed": seed, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "suffix": suffix, - "temperature": temperature, - "top_p": top_p, - "user": user, - } - - prompt: MetaOapg.properties.prompt - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "model", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> typing.Union[MetaOapg.properties.model, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["suffix"]) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "model", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - prompt: typing.Union[MetaOapg.properties.prompt, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, - length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, - stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, - ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, - min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, - spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, - add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, - response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, - guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, - guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, - model: typing.Union[MetaOapg.properties.model, None, str, schemas.Unset] = schemas.unset, - echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, - frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - logprobs: typing.Union[MetaOapg.properties.logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, - stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, - stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, - suffix: typing.Union[MetaOapg.properties.suffix, None, str, schemas.Unset] = schemas.unset, - temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'FilteredCompletionV2Request': - return super().__new__( - cls, - *_args, - prompt=prompt, - best_of=best_of, - top_k=top_k, - min_p=min_p, - use_beam_search=use_beam_search, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - early_stopping=early_stopping, - stop_token_ids=stop_token_ids, - include_stop_str_in_output=include_stop_str_in_output, - ignore_eos=ignore_eos, - min_tokens=min_tokens, - skip_special_tokens=skip_special_tokens, - spaces_between_special_tokens=spaces_between_special_tokens, - add_special_tokens=add_special_tokens, - response_format=response_format, - guided_json=guided_json, - guided_regex=guided_regex, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_decoding_backend=guided_decoding_backend, - guided_whitespace_pattern=guided_whitespace_pattern, - model=model, - echo=echo, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - presence_penalty=presence_penalty, - seed=seed, - stop=stop, - stream=stream, - stream_options=stream_options, - suffix=suffix, - temperature=temperature, - top_p=top_p, - user=user, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_stream_options import ( - ChatCompletionStreamOptions, -) -from launch.api_client.model.prompt import Prompt -from launch.api_client.model.prompt1 import Prompt1 -from launch.api_client.model.response_format_json_object import ( - ResponseFormatJsonObject, -) -from launch.api_client.model.response_format_json_schema import ( - ResponseFormatJsonSchema, -) -from launch.api_client.model.response_format_text import ResponseFormatText -from launch.api_client.model.stop_configuration import StopConfiguration diff --git a/launch/api_client/model/function1.py b/launch/api_client/model/function1.py deleted file mode 100644 index 40d825cd..00000000 --- a/launch/api_client/model/function1.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Function1( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "name", - "arguments", - } - - class properties: - name = schemas.StrSchema - arguments = schemas.StrSchema - __annotations__ = { - "name": name, - "arguments": arguments, - } - - name: MetaOapg.properties.name - arguments: MetaOapg.properties.arguments - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - arguments: typing.Union[MetaOapg.properties.arguments, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Function1': - return super().__new__( - cls, - *_args, - name=name, - arguments=arguments, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/function2.py b/launch/api_client/model/function2.py deleted file mode 100644 index 5047325c..00000000 --- a/launch/api_client/model/function2.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Function2( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class arguments( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'arguments': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "name": name, - "arguments": arguments, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> typing.Union[MetaOapg.properties.arguments, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, - arguments: typing.Union[MetaOapg.properties.arguments, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Function2': - return super().__new__( - cls, - *_args, - name=name, - arguments=arguments, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/function3.py b/launch/api_client/model/function3.py deleted file mode 100644 index 183409ff..00000000 --- a/launch/api_client/model/function3.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Function3( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "name", - } - - class properties: - name = schemas.StrSchema - __annotations__ = { - "name": name, - } - - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Function3': - return super().__new__( - cls, - *_args, - name=name, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/function_call.py b/launch/api_client/model/function_call.py deleted file mode 100644 index 8fe1ae52..00000000 --- a/launch/api_client/model/function_call.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class FunctionCall( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "name", - "arguments", - } - - class properties: - arguments = schemas.StrSchema - name = schemas.StrSchema - __annotations__ = { - "arguments": arguments, - "name": name, - } - - name: MetaOapg.properties.name - arguments: MetaOapg.properties.arguments - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - arguments: typing.Union[MetaOapg.properties.arguments, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'FunctionCall': - return super().__new__( - cls, - *_args, - name=name, - arguments=arguments, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/function_call2.py b/launch/api_client/model/function_call2.py deleted file mode 100644 index 4a13f166..00000000 --- a/launch/api_client/model/function_call2.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class FunctionCall2( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class arguments( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'arguments': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "arguments": arguments, - "name": name, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> typing.Union[MetaOapg.properties.arguments, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - arguments: typing.Union[MetaOapg.properties.arguments, None, str, schemas.Unset] = schemas.unset, - name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'FunctionCall2': - return super().__new__( - cls, - *_args, - arguments=arguments, - name=name, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/function_object.py b/launch/api_client/model/function_object.py deleted file mode 100644 index 631de02b..00000000 --- a/launch/api_client/model/function_object.py +++ /dev/null @@ -1,156 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class FunctionObject( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "name", - } - - class properties: - name = schemas.StrSchema - - - class description( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'description': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def parameters() -> typing.Type['FunctionParameters']: - return FunctionParameters - - - class strict( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'strict': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "name": name, - "description": description, - "parameters": parameters, - "strict": strict, - } - - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["parameters"]) -> 'FunctionParameters': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["strict"]) -> MetaOapg.properties.strict: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", "strict", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["parameters"]) -> typing.Union['FunctionParameters', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["strict"]) -> typing.Union[MetaOapg.properties.strict, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", "strict", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - description: typing.Union[MetaOapg.properties.description, None, str, schemas.Unset] = schemas.unset, - parameters: typing.Union['FunctionParameters', schemas.Unset] = schemas.unset, - strict: typing.Union[MetaOapg.properties.strict, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'FunctionObject': - return super().__new__( - cls, - *_args, - name=name, - description=description, - parameters=parameters, - strict=strict, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.function_parameters import FunctionParameters diff --git a/launch/api_client/model/function_parameters.py b/launch/api_client/model/function_parameters.py deleted file mode 100644 index 17562468..00000000 --- a/launch/api_client/model/function_parameters.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class FunctionParameters( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'FunctionParameters': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/get_async_task_v1_response.py b/launch/api_client/model/get_async_task_v1_response.py deleted file mode 100644 index 0bfed678..00000000 --- a/launch/api_client/model/get_async_task_v1_response.py +++ /dev/null @@ -1,168 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetAsyncTaskV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "task_id", - "status", - } - - class properties: - task_id = schemas.StrSchema - - @staticmethod - def status() -> typing.Type['TaskStatus']: - return TaskStatus - result = schemas.AnyTypeSchema - - - class traceback( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'traceback': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class status_code( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'status_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "task_id": task_id, - "status": status, - "result": result, - "traceback": traceback, - "status_code": status_code, - } - - task_id: MetaOapg.properties.task_id - status: 'TaskStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["task_id", "status", "result", "traceback", "status_code", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["result"]) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["traceback"]) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> typing.Union[MetaOapg.properties.status_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["task_id", "status", "result", "traceback", "status_code", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - task_id: typing.Union[MetaOapg.properties.task_id, str, ], - status: 'TaskStatus', - result: typing.Union[MetaOapg.properties.result, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - traceback: typing.Union[MetaOapg.properties.traceback, None, str, schemas.Unset] = schemas.unset, - status_code: typing.Union[MetaOapg.properties.status_code, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetAsyncTaskV1Response': - return super().__new__( - cls, - *_args, - task_id=task_id, - status=status, - result=result, - traceback=traceback, - status_code=status_code, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.task_status import TaskStatus diff --git a/launch/api_client/model/get_batch_completion_v2_response.py b/launch/api_client/model/get_batch_completion_v2_response.py deleted file mode 100644 index 3a136fe3..00000000 --- a/launch/api_client/model/get_batch_completion_v2_response.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetBatchCompletionV2Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "job", - } - - class properties: - - @staticmethod - def job() -> typing.Type['BatchCompletionsJob']: - return BatchCompletionsJob - __annotations__ = { - "job": job, - } - - job: 'BatchCompletionsJob' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job"]) -> 'BatchCompletionsJob': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["job", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job"]) -> 'BatchCompletionsJob': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - job: 'BatchCompletionsJob', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetBatchCompletionV2Response': - return super().__new__( - cls, - *_args, - job=job, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_completions_job import BatchCompletionsJob diff --git a/launch/api_client/model/get_batch_job_v1_response.py b/launch/api_client/model/get_batch_job_v1_response.py deleted file mode 100644 index 667120b9..00000000 --- a/launch/api_client/model/get_batch_job_v1_response.py +++ /dev/null @@ -1,187 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetBatchJobV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "duration", - "status", - } - - class properties: - - @staticmethod - def status() -> typing.Type['BatchJobStatus']: - return BatchJobStatus - duration = schemas.StrSchema - - - class result( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'result': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_tasks_pending( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_tasks_pending': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_tasks_completed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_tasks_completed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "status": status, - "duration": duration, - "result": result, - "num_tasks_pending": num_tasks_pending, - "num_tasks_completed": num_tasks_completed, - } - - duration: MetaOapg.properties.duration - status: 'BatchJobStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_tasks_pending"]) -> MetaOapg.properties.num_tasks_pending: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_tasks_completed"]) -> MetaOapg.properties.num_tasks_completed: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["status", "duration", "result", "num_tasks_pending", "num_tasks_completed", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["result"]) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_tasks_pending"]) -> typing.Union[MetaOapg.properties.num_tasks_pending, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_tasks_completed"]) -> typing.Union[MetaOapg.properties.num_tasks_completed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status", "duration", "result", "num_tasks_pending", "num_tasks_completed", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - duration: typing.Union[MetaOapg.properties.duration, str, ], - status: 'BatchJobStatus', - result: typing.Union[MetaOapg.properties.result, None, str, schemas.Unset] = schemas.unset, - num_tasks_pending: typing.Union[MetaOapg.properties.num_tasks_pending, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - num_tasks_completed: typing.Union[MetaOapg.properties.num_tasks_completed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetBatchJobV1Response': - return super().__new__( - cls, - *_args, - duration=duration, - status=status, - result=result, - num_tasks_pending=num_tasks_pending, - num_tasks_completed=num_tasks_completed, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_docker_image_batch_job_v1_response.py b/launch/api_client/model/get_docker_image_batch_job_v1_response.py deleted file mode 100644 index 0015a5a8..00000000 --- a/launch/api_client/model/get_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetDockerImageBatchJobV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "status", - } - - class properties: - - @staticmethod - def status() -> typing.Type['BatchJobStatus']: - return BatchJobStatus - __annotations__ = { - "status": status, - } - - status: 'BatchJobStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["status", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - status: 'BatchJobStatus', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetDockerImageBatchJobV1Response': - return super().__new__( - cls, - *_args, - status=status, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_file_content_response.py b/launch/api_client/model/get_file_content_response.py deleted file mode 100644 index 6e770f90..00000000 --- a/launch/api_client/model/get_file_content_response.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetFileContentResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for retrieving a file's content. - """ - - - class MetaOapg: - required = { - "id", - "content", - } - - class properties: - id = schemas.StrSchema - content = schemas.StrSchema - __annotations__ = { - "id": id, - "content": content, - } - - id: MetaOapg.properties.id - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "content", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "content", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - id: typing.Union[MetaOapg.properties.id, str, ], - content: typing.Union[MetaOapg.properties.content, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetFileContentResponse': - return super().__new__( - cls, - *_args, - id=id, - content=content, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/get_file_response.py b/launch/api_client/model/get_file_response.py deleted file mode 100644 index 476e738e..00000000 --- a/launch/api_client/model/get_file_response.py +++ /dev/null @@ -1,109 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetFileResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for retrieving a file. - """ - - - class MetaOapg: - required = { - "filename", - "size", - "id", - } - - class properties: - id = schemas.StrSchema - filename = schemas.StrSchema - size = schemas.IntSchema - __annotations__ = { - "id": id, - "filename": filename, - "size": size, - } - - filename: MetaOapg.properties.filename - size: MetaOapg.properties.size - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "filename", "size", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "filename", "size", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - filename: typing.Union[MetaOapg.properties.filename, str, ], - size: typing.Union[MetaOapg.properties.size, decimal.Decimal, int, ], - id: typing.Union[MetaOapg.properties.id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetFileResponse': - return super().__new__( - cls, - *_args, - filename=filename, - size=size, - id=id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/get_fine_tune_events_response.py b/launch/api_client/model/get_fine_tune_events_response.py deleted file mode 100644 index d2b50d5b..00000000 --- a/launch/api_client/model/get_fine_tune_events_response.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetFineTuneEventsResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "events", - } - - class properties: - - - class events( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['LLMFineTuneEvent']: - return LLMFineTuneEvent - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['LLMFineTuneEvent'], typing.List['LLMFineTuneEvent']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'events': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'LLMFineTuneEvent': - return super().__getitem__(i) - __annotations__ = { - "events": events, - } - - events: MetaOapg.properties.events - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["events", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["events", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - events: typing.Union[MetaOapg.properties.events, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetFineTuneEventsResponse': - return super().__new__( - cls, - *_args, - events=events, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.llm_fine_tune_event import LLMFineTuneEvent diff --git a/launch/api_client/model/get_fine_tune_job_response.py b/launch/api_client/model/get_fine_tune_job_response.py deleted file mode 100644 index e7cbae92..00000000 --- a/launch/api_client/model/get_fine_tune_job_response.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetFineTuneResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "fine_tune_id", - "status", - } - - class properties: - fine_tune_id = schemas.StrSchema - - @staticmethod - def status() -> typing.Type["BatchJobStatus"]: - return BatchJobStatus - - __annotations__ = { - "fine_tune_id": fine_tune_id, - "status": status, - } - - fine_tune_id: MetaOapg.properties.fine_tune_id - status: "BatchJobStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "fine_tune_id", - "status", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "fine_tune_id", - "status", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - fine_tune_id: typing.Union[ - MetaOapg.properties.fine_tune_id, - str, - ], - status: "BatchJobStatus", - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFineTuneResponse": - return super().__new__( - cls, - *_args, - fine_tune_id=fine_tune_id, - status=status, - _configuration=_configuration, - **kwargs, - ) - - -from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_fine_tune_response.py b/launch/api_client/model/get_fine_tune_response.py deleted file mode 100644 index d8e32316..00000000 --- a/launch/api_client/model/get_fine_tune_response.py +++ /dev/null @@ -1,129 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetFineTuneResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "id", - "status", - } - - class properties: - id = schemas.StrSchema - - @staticmethod - def status() -> typing.Type['BatchJobStatus']: - return BatchJobStatus - - - class fine_tuned_model( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'fine_tuned_model': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "id": id, - "status": status, - "fine_tuned_model": fine_tuned_model, - } - - id: MetaOapg.properties.id - status: 'BatchJobStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["fine_tuned_model"]) -> MetaOapg.properties.fine_tuned_model: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "status", "fine_tuned_model", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["fine_tuned_model"]) -> typing.Union[MetaOapg.properties.fine_tuned_model, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "status", "fine_tuned_model", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - id: typing.Union[MetaOapg.properties.id, str, ], - status: 'BatchJobStatus', - fine_tuned_model: typing.Union[MetaOapg.properties.fine_tuned_model, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetFineTuneResponse': - return super().__new__( - cls, - *_args, - id=id, - status=status, - fine_tuned_model=fine_tuned_model, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_llm_model_endpoint_v1_response.py b/launch/api_client/model/get_llm_model_endpoint_v1_response.py deleted file mode 100644 index ee0c86e3..00000000 --- a/launch/api_client/model/get_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,304 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetLLMModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "inference_framework", - "model_name", - "name", - "id", - "source", - "status", - } - - class properties: - id = schemas.StrSchema - name = schemas.StrSchema - model_name = schemas.StrSchema - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - - @staticmethod - def status() -> typing.Type['ModelEndpointStatus']: - return ModelEndpointStatus - - @staticmethod - def inference_framework() -> typing.Type['LLMInferenceFramework']: - return LLMInferenceFramework - - - class inference_framework_image_tag( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'inference_framework_image_tag': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_shards( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_shards': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def spec() -> typing.Type['GetModelEndpointV1Response']: - return GetModelEndpointV1Response - __annotations__ = { - "id": id, - "name": name, - "model_name": model_name, - "source": source, - "status": status, - "inference_framework": inference_framework, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "chat_template_override": chat_template_override, - "spec": spec, - } - - inference_framework: 'LLMInferenceFramework' - model_name: MetaOapg.properties.model_name - name: MetaOapg.properties.name - id: MetaOapg.properties.id - source: 'LLMSource' - status: 'ModelEndpointStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> 'LLMInferenceFramework': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["spec"]) -> 'GetModelEndpointV1Response': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "model_name", "source", "status", "inference_framework", "inference_framework_image_tag", "num_shards", "quantize", "checkpoint_path", "chat_template_override", "spec", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> 'LLMInferenceFramework': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["spec"]) -> typing.Union['GetModelEndpointV1Response', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "model_name", "source", "status", "inference_framework", "inference_framework_image_tag", "num_shards", "quantize", "checkpoint_path", "chat_template_override", "spec", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - inference_framework: 'LLMInferenceFramework', - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - name: typing.Union[MetaOapg.properties.name, str, ], - id: typing.Union[MetaOapg.properties.id, str, ], - source: 'LLMSource', - status: 'ModelEndpointStatus', - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - spec: typing.Union['GetModelEndpointV1Response', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetLLMModelEndpointV1Response': - return super().__new__( - cls, - *_args, - inference_framework=inference_framework, - model_name=model_name, - name=name, - id=id, - source=source, - status=status, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - quantize=quantize, - checkpoint_path=checkpoint_path, - chat_template_override=chat_template_override, - spec=spec, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) -from launch.api_client.model.llm_inference_framework import ( - LLMInferenceFramework, -) -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_status import ModelEndpointStatus -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/get_model_endpoint_v1_response.py b/launch/api_client/model/get_model_endpoint_v1_response.py deleted file mode 100644 index 8d2012b2..00000000 --- a/launch/api_client/model/get_model_endpoint_v1_response.py +++ /dev/null @@ -1,525 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "endpoint_type", - "last_updated_at", - "destination", - "name", - "created_at", - "bundle_name", - "id", - "created_by", - "status", - } - - class properties: - id = schemas.StrSchema - name = schemas.StrSchema - - @staticmethod - def endpoint_type() -> typing.Type['ModelEndpointType']: - return ModelEndpointType - destination = schemas.StrSchema - bundle_name = schemas.StrSchema - - @staticmethod - def status() -> typing.Type['ModelEndpointStatus']: - return ModelEndpointStatus - created_by = schemas.StrSchema - created_at = schemas.DateTimeSchema - last_updated_at = schemas.DateTimeSchema - - - class deployment_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'deployment_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class labels( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class aws_role( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'aws_role': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class results_s3_bucket( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'results_s3_bucket': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def deployment_state() -> typing.Type['ModelEndpointDeploymentState']: - return ModelEndpointDeploymentState - - @staticmethod - def resource_state() -> typing.Type['ModelEndpointResourceState']: - return ModelEndpointResourceState - - - class num_queued_items( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_queued_items': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "id": id, - "name": name, - "endpoint_type": endpoint_type, - "destination": destination, - "bundle_name": bundle_name, - "status": status, - "created_by": created_by, - "created_at": created_at, - "last_updated_at": last_updated_at, - "deployment_name": deployment_name, - "metadata": metadata, - "post_inference_hooks": post_inference_hooks, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "labels": labels, - "aws_role": aws_role, - "results_s3_bucket": results_s3_bucket, - "deployment_state": deployment_state, - "resource_state": resource_state, - "num_queued_items": num_queued_items, - "public_inference": public_inference, - } - - endpoint_type: 'ModelEndpointType' - last_updated_at: MetaOapg.properties.last_updated_at - destination: MetaOapg.properties.destination - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - bundle_name: MetaOapg.properties.bundle_name - id: MetaOapg.properties.id - created_by: MetaOapg.properties.created_by - status: 'ModelEndpointStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["last_updated_at"]) -> MetaOapg.properties.last_updated_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deployment_name"]) -> MetaOapg.properties.deployment_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["aws_role"]) -> MetaOapg.properties.aws_role: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["results_s3_bucket"]) -> MetaOapg.properties.results_s3_bucket: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deployment_state"]) -> 'ModelEndpointDeploymentState': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_state"]) -> 'ModelEndpointResourceState': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_queued_items"]) -> MetaOapg.properties.num_queued_items: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "endpoint_type", "destination", "bundle_name", "status", "created_by", "created_at", "last_updated_at", "deployment_name", "metadata", "post_inference_hooks", "default_callback_url", "default_callback_auth", "labels", "aws_role", "results_s3_bucket", "deployment_state", "resource_state", "num_queued_items", "public_inference", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["last_updated_at"]) -> MetaOapg.properties.last_updated_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deployment_name"]) -> typing.Union[MetaOapg.properties.deployment_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["aws_role"]) -> typing.Union[MetaOapg.properties.aws_role, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["results_s3_bucket"]) -> typing.Union[MetaOapg.properties.results_s3_bucket, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deployment_state"]) -> typing.Union['ModelEndpointDeploymentState', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["resource_state"]) -> typing.Union['ModelEndpointResourceState', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_queued_items"]) -> typing.Union[MetaOapg.properties.num_queued_items, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "endpoint_type", "destination", "bundle_name", "status", "created_by", "created_at", "last_updated_at", "deployment_name", "metadata", "post_inference_hooks", "default_callback_url", "default_callback_auth", "labels", "aws_role", "results_s3_bucket", "deployment_state", "resource_state", "num_queued_items", "public_inference", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - endpoint_type: 'ModelEndpointType', - last_updated_at: typing.Union[MetaOapg.properties.last_updated_at, str, datetime, ], - destination: typing.Union[MetaOapg.properties.destination, str, ], - name: typing.Union[MetaOapg.properties.name, str, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], - bundle_name: typing.Union[MetaOapg.properties.bundle_name, str, ], - id: typing.Union[MetaOapg.properties.id, str, ], - created_by: typing.Union[MetaOapg.properties.created_by, str, ], - status: 'ModelEndpointStatus', - deployment_name: typing.Union[MetaOapg.properties.deployment_name, None, str, schemas.Unset] = schemas.unset, - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - aws_role: typing.Union[MetaOapg.properties.aws_role, None, str, schemas.Unset] = schemas.unset, - results_s3_bucket: typing.Union[MetaOapg.properties.results_s3_bucket, None, str, schemas.Unset] = schemas.unset, - deployment_state: typing.Union['ModelEndpointDeploymentState', schemas.Unset] = schemas.unset, - resource_state: typing.Union['ModelEndpointResourceState', schemas.Unset] = schemas.unset, - num_queued_items: typing.Union[MetaOapg.properties.num_queued_items, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetModelEndpointV1Response': - return super().__new__( - cls, - *_args, - endpoint_type=endpoint_type, - last_updated_at=last_updated_at, - destination=destination, - name=name, - created_at=created_at, - bundle_name=bundle_name, - id=id, - created_by=created_by, - status=status, - deployment_name=deployment_name, - metadata=metadata, - post_inference_hooks=post_inference_hooks, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - labels=labels, - aws_role=aws_role, - results_s3_bucket=results_s3_bucket, - deployment_state=deployment_state, - resource_state=resource_state, - num_queued_items=num_queued_items, - public_inference=public_inference, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.model_endpoint_deployment_state import ( - ModelEndpointDeploymentState, -) -from launch.api_client.model.model_endpoint_resource_state import ( - ModelEndpointResourceState, -) -from launch.api_client.model.model_endpoint_status import ModelEndpointStatus -from launch.api_client.model.model_endpoint_type import ModelEndpointType diff --git a/launch/api_client/model/get_trigger_v1_response.py b/launch/api_client/model/get_trigger_v1_response.py deleted file mode 100644 index 9905aeaf..00000000 --- a/launch/api_client/model/get_trigger_v1_response.py +++ /dev/null @@ -1,239 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GetTriggerV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "owner", - "cron_schedule", - "docker_image_batch_job_bundle_id", - "name", - "created_at", - "id", - "created_by", - } - - class properties: - id = schemas.StrSchema - name = schemas.StrSchema - owner = schemas.StrSchema - created_by = schemas.StrSchema - created_at = schemas.DateTimeSchema - cron_schedule = schemas.StrSchema - docker_image_batch_job_bundle_id = schemas.StrSchema - - - class default_job_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'default_job_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_job_metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'default_job_metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "id": id, - "name": name, - "owner": owner, - "created_by": created_by, - "created_at": created_at, - "cron_schedule": cron_schedule, - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, - "default_job_config": default_job_config, - "default_job_metadata": default_job_metadata, - } - - owner: MetaOapg.properties.owner - cron_schedule: MetaOapg.properties.cron_schedule - docker_image_batch_job_bundle_id: MetaOapg.properties.docker_image_batch_job_bundle_id - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - created_by: MetaOapg.properties.created_by - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_job_config"]) -> MetaOapg.properties.default_job_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_job_metadata"]) -> MetaOapg.properties.default_job_metadata: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "owner", "created_by", "created_at", "cron_schedule", "docker_image_batch_job_bundle_id", "default_job_config", "default_job_metadata", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_job_config"]) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_job_metadata"]) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "owner", "created_by", "created_at", "cron_schedule", "docker_image_batch_job_bundle_id", "default_job_config", "default_job_metadata", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - owner: typing.Union[MetaOapg.properties.owner, str, ], - cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, str, ], - docker_image_batch_job_bundle_id: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, str, ], - name: typing.Union[MetaOapg.properties.name, str, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], - id: typing.Union[MetaOapg.properties.id, str, ], - created_by: typing.Union[MetaOapg.properties.created_by, str, ], - default_job_config: typing.Union[MetaOapg.properties.default_job_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_job_metadata: typing.Union[MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'GetTriggerV1Response': - return super().__new__( - cls, - *_args, - owner=owner, - cron_schedule=cron_schedule, - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - name=name, - created_at=created_at, - id=id, - created_by=created_by, - default_job_config=default_job_config, - default_job_metadata=default_job_metadata, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/gpu_type.py b/launch/api_client/model/gpu_type.py deleted file mode 100644 index 599f505f..00000000 --- a/launch/api_client/model/gpu_type.py +++ /dev/null @@ -1,76 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class GpuType( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Lists allowed GPU types for Launch. - """ - - - class MetaOapg: - enum_value_to_name = { - "nvidia-tesla-t4": "TESLAT4", - "nvidia-ampere-a10": "AMPEREA10", - "nvidia-ampere-a100": "AMPEREA100", - "nvidia-ampere-a100e": "AMPEREA100E", - "nvidia-hopper-h100": "HOPPERH100", - "nvidia-hopper-h100-1g20gb": "HOPPERH1001G20GB", - "nvidia-hopper-h100-3g40gb": "HOPPERH1003G40GB", - } - - @schemas.classproperty - def TESLAT4(cls): - return cls("nvidia-tesla-t4") - - @schemas.classproperty - def AMPEREA10(cls): - return cls("nvidia-ampere-a10") - - @schemas.classproperty - def AMPEREA100(cls): - return cls("nvidia-ampere-a100") - - @schemas.classproperty - def AMPEREA100E(cls): - return cls("nvidia-ampere-a100e") - - @schemas.classproperty - def HOPPERH100(cls): - return cls("nvidia-hopper-h100") - - @schemas.classproperty - def HOPPERH1001G20GB(cls): - return cls("nvidia-hopper-h100-1g20gb") - - @schemas.classproperty - def HOPPERH1003G40GB(cls): - return cls("nvidia-hopper-h100-3g40gb") diff --git a/launch/api_client/model/http_validation_error.py b/launch/api_client/model/http_validation_error.py deleted file mode 100644 index af88fa1b..00000000 --- a/launch/api_client/model/http_validation_error.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class HTTPValidationError( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class detail( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ValidationError']: - return ValidationError - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ValidationError'], typing.List['ValidationError']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'detail': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ValidationError': - return super().__getitem__(i) - __annotations__ = { - "detail": detail, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["detail"]) -> MetaOapg.properties.detail: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["detail", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["detail"]) -> typing.Union[MetaOapg.properties.detail, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["detail", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - detail: typing.Union[MetaOapg.properties.detail, list, tuple, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'HTTPValidationError': - return super().__new__( - cls, - *_args, - detail=detail, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.validation_error import ValidationError diff --git a/launch/api_client/model/image_url.py b/launch/api_client/model/image_url.py deleted file mode 100644 index b5c1f45c..00000000 --- a/launch/api_client/model/image_url.py +++ /dev/null @@ -1,128 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ImageUrl( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "url", - } - - class properties: - - - class url( - schemas.StrSchema - ): - - - class MetaOapg: - format = 'uri' - max_length = 65536 - min_length = 1 - - - class detail( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "auto": "AUTO", - "low": "LOW", - "high": "HIGH", - } - - @schemas.classproperty - def AUTO(cls): - return cls("auto") - - @schemas.classproperty - def LOW(cls): - return cls("low") - - @schemas.classproperty - def HIGH(cls): - return cls("high") - __annotations__ = { - "url": url, - "detail": detail, - } - - url: MetaOapg.properties.url - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["detail"]) -> MetaOapg.properties.detail: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["url", "detail", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["detail"]) -> typing.Union[MetaOapg.properties.detail, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["url", "detail", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - url: typing.Union[MetaOapg.properties.url, str, ], - detail: typing.Union[MetaOapg.properties.detail, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ImageUrl': - return super().__new__( - cls, - *_args, - url=url, - detail=detail, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/input_audio.py b/launch/api_client/model/input_audio.py deleted file mode 100644 index 86835ecc..00000000 --- a/launch/api_client/model/input_audio.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class InputAudio( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "data", - "format", - } - - class properties: - data = schemas.StrSchema - - - class format( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "wav": "WAV", - "mp3": "MP3", - } - - @schemas.classproperty - def WAV(cls): - return cls("wav") - - @schemas.classproperty - def MP3(cls): - return cls("mp3") - __annotations__ = { - "data": data, - "format": format, - } - - data: MetaOapg.properties.data - format: MetaOapg.properties.format - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["data", "format", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["data", "format", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - data: typing.Union[MetaOapg.properties.data, str, ], - format: typing.Union[MetaOapg.properties.format, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'InputAudio': - return super().__new__( - cls, - *_args, - data=data, - format=format, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/json_schema.py b/launch/api_client/model/json_schema.py deleted file mode 100644 index 092891e8..00000000 --- a/launch/api_client/model/json_schema.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class JsonSchema( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "name", - } - - class properties: - name = schemas.StrSchema - - - class description( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'description': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def schema() -> typing.Type['ResponseFormatJsonSchemaSchema']: - return ResponseFormatJsonSchemaSchema - - - class strict( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'strict': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "name": name, - "description": description, - "schema": schema, - "strict": strict, - } - - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema"]) -> 'ResponseFormatJsonSchemaSchema': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["strict"]) -> MetaOapg.properties.strict: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "description", "schema", "strict", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schema"]) -> typing.Union['ResponseFormatJsonSchemaSchema', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["strict"]) -> typing.Union[MetaOapg.properties.strict, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "description", "schema", "strict", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - description: typing.Union[MetaOapg.properties.description, None, str, schemas.Unset] = schemas.unset, - schema: typing.Union['ResponseFormatJsonSchemaSchema', schemas.Unset] = schemas.unset, - strict: typing.Union[MetaOapg.properties.strict, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'JsonSchema': - return super().__new__( - cls, - *_args, - name=name, - description=description, - schema=schema, - strict=strict, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.response_format_json_schema_schema import ( - ResponseFormatJsonSchemaSchema, -) diff --git a/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index b4191e33..00000000 --- a/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListDockerImageBatchJobBundleV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "docker_image_batch_job_bundles", - } - - class properties: - - - class docker_image_batch_job_bundles( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['DockerImageBatchJobBundleV1Response']: - return DockerImageBatchJobBundleV1Response - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['DockerImageBatchJobBundleV1Response'], typing.List['DockerImageBatchJobBundleV1Response']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'docker_image_batch_job_bundles': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'DockerImageBatchJobBundleV1Response': - return super().__getitem__(i) - __annotations__ = { - "docker_image_batch_job_bundles": docker_image_batch_job_bundles, - } - - docker_image_batch_job_bundles: MetaOapg.properties.docker_image_batch_job_bundles - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundles"]) -> MetaOapg.properties.docker_image_batch_job_bundles: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundles", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundles"]) -> MetaOapg.properties.docker_image_batch_job_bundles: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundles", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - docker_image_batch_job_bundles: typing.Union[MetaOapg.properties.docker_image_batch_job_bundles, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListDockerImageBatchJobBundleV1Response': - return super().__new__( - cls, - *_args, - docker_image_batch_job_bundles=docker_image_batch_job_bundles, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) diff --git a/launch/api_client/model/list_docker_image_batch_jobs_v1_response.py b/launch/api_client/model/list_docker_image_batch_jobs_v1_response.py deleted file mode 100644 index 1213f5a8..00000000 --- a/launch/api_client/model/list_docker_image_batch_jobs_v1_response.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListDockerImageBatchJobsV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "jobs", - } - - class properties: - - - class jobs( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['DockerImageBatchJob']: - return DockerImageBatchJob - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['DockerImageBatchJob'], typing.List['DockerImageBatchJob']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'jobs': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'DockerImageBatchJob': - return super().__getitem__(i) - __annotations__ = { - "jobs": jobs, - } - - jobs: MetaOapg.properties.jobs - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - jobs: typing.Union[MetaOapg.properties.jobs, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListDockerImageBatchJobsV1Response': - return super().__new__( - cls, - *_args, - jobs=jobs, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.docker_image_batch_job import DockerImageBatchJob diff --git a/launch/api_client/model/list_files_response.py b/launch/api_client/model/list_files_response.py deleted file mode 100644 index 8af8b13a..00000000 --- a/launch/api_client/model/list_files_response.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListFilesResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for listing files. - """ - - - class MetaOapg: - required = { - "files", - } - - class properties: - - - class files( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['GetFileResponse']: - return GetFileResponse - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['GetFileResponse'], typing.List['GetFileResponse']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'files': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'GetFileResponse': - return super().__getitem__(i) - __annotations__ = { - "files": files, - } - - files: MetaOapg.properties.files - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["files", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["files", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - files: typing.Union[MetaOapg.properties.files, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListFilesResponse': - return super().__new__( - cls, - *_args, - files=files, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.get_file_response import GetFileResponse diff --git a/launch/api_client/model/list_fine_tune_job_response.py b/launch/api_client/model/list_fine_tune_job_response.py deleted file mode 100644 index fa6b2afe..00000000 --- a/launch/api_client/model/list_fine_tune_job_response.py +++ /dev/null @@ -1,145 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListFineTunesResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "jobs", - } - - class properties: - class jobs(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["GetFineTuneResponse"]: - return GetFineTuneResponse - - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple["GetFineTuneResponse"], - typing.List["GetFineTuneResponse"], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "jobs": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> "GetFineTuneResponse": - return super().__getitem__(i) - - __annotations__ = { - "jobs": jobs, - } - - jobs: MetaOapg.properties.jobs - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "jobs", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "jobs", - ], - str, - ], - ): - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - jobs: typing.Union[ - MetaOapg.properties.jobs, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListFineTunesResponse": - return super().__new__( - cls, - *_args, - jobs=jobs, - _configuration=_configuration, - **kwargs, - ) - - -from launch.api_client.model.get_fine_tune_response import GetFineTuneResponse diff --git a/launch/api_client/model/list_fine_tunes_response.py b/launch/api_client/model/list_fine_tunes_response.py deleted file mode 100644 index 7243eb4a..00000000 --- a/launch/api_client/model/list_fine_tunes_response.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListFineTunesResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "jobs", - } - - class properties: - - - class jobs( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['GetFineTuneResponse']: - return GetFineTuneResponse - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['GetFineTuneResponse'], typing.List['GetFineTuneResponse']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'jobs': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'GetFineTuneResponse': - return super().__getitem__(i) - __annotations__ = { - "jobs": jobs, - } - - jobs: MetaOapg.properties.jobs - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - jobs: typing.Union[MetaOapg.properties.jobs, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListFineTunesResponse': - return super().__new__( - cls, - *_args, - jobs=jobs, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.get_fine_tune_response import GetFineTuneResponse diff --git a/launch/api_client/model/list_llm_model_endpoints_v1_response.py b/launch/api_client/model/list_llm_model_endpoints_v1_response.py deleted file mode 100644 index 726d98fb..00000000 --- a/launch/api_client/model/list_llm_model_endpoints_v1_response.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListLLMModelEndpointsV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "model_endpoints", - } - - class properties: - - - class model_endpoints( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['GetLLMModelEndpointV1Response']: - return GetLLMModelEndpointV1Response - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['GetLLMModelEndpointV1Response'], typing.List['GetLLMModelEndpointV1Response']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_endpoints': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'GetLLMModelEndpointV1Response': - return super().__getitem__(i) - __annotations__ = { - "model_endpoints": model_endpoints, - } - - model_endpoints: MetaOapg.properties.model_endpoints - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_endpoints: typing.Union[MetaOapg.properties.model_endpoints, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListLLMModelEndpointsV1Response': - return super().__new__( - cls, - *_args, - model_endpoints=model_endpoints, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) diff --git a/launch/api_client/model/list_model_bundles_v1_response.py b/launch/api_client/model/list_model_bundles_v1_response.py deleted file mode 100644 index 6e7bb2a8..00000000 --- a/launch/api_client/model/list_model_bundles_v1_response.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListModelBundlesV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for listing Model Bundles. - """ - - - class MetaOapg: - required = { - "model_bundles", - } - - class properties: - - - class model_bundles( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ModelBundleV1Response']: - return ModelBundleV1Response - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ModelBundleV1Response'], typing.List['ModelBundleV1Response']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_bundles': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ModelBundleV1Response': - return super().__getitem__(i) - __annotations__ = { - "model_bundles": model_bundles, - } - - model_bundles: MetaOapg.properties.model_bundles - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_bundles: typing.Union[MetaOapg.properties.model_bundles, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListModelBundlesV1Response': - return super().__new__( - cls, - *_args, - model_bundles=model_bundles, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.model_bundle_v1_response import ( - ModelBundleV1Response, -) diff --git a/launch/api_client/model/list_model_bundles_v2_response.py b/launch/api_client/model/list_model_bundles_v2_response.py deleted file mode 100644 index 0b6ab300..00000000 --- a/launch/api_client/model/list_model_bundles_v2_response.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListModelBundlesV2Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for listing Model Bundles. - """ - - - class MetaOapg: - required = { - "model_bundles", - } - - class properties: - - - class model_bundles( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ModelBundleV2Response']: - return ModelBundleV2Response - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['ModelBundleV2Response'], typing.List['ModelBundleV2Response']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_bundles': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'ModelBundleV2Response': - return super().__getitem__(i) - __annotations__ = { - "model_bundles": model_bundles, - } - - model_bundles: MetaOapg.properties.model_bundles - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_bundles: typing.Union[MetaOapg.properties.model_bundles, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListModelBundlesV2Response': - return super().__new__( - cls, - *_args, - model_bundles=model_bundles, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.model_bundle_v2_response import ( - ModelBundleV2Response, -) diff --git a/launch/api_client/model/list_model_endpoints_v1_response.py b/launch/api_client/model/list_model_endpoints_v1_response.py deleted file mode 100644 index 2ddfacd1..00000000 --- a/launch/api_client/model/list_model_endpoints_v1_response.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListModelEndpointsV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "model_endpoints", - } - - class properties: - - - class model_endpoints( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['GetModelEndpointV1Response']: - return GetModelEndpointV1Response - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['GetModelEndpointV1Response'], typing.List['GetModelEndpointV1Response']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_endpoints': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'GetModelEndpointV1Response': - return super().__getitem__(i) - __annotations__ = { - "model_endpoints": model_endpoints, - } - - model_endpoints: MetaOapg.properties.model_endpoints - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_endpoints: typing.Union[MetaOapg.properties.model_endpoints, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListModelEndpointsV1Response': - return super().__new__( - cls, - *_args, - model_endpoints=model_endpoints, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) diff --git a/launch/api_client/model/list_triggers_v1_response.py b/launch/api_client/model/list_triggers_v1_response.py deleted file mode 100644 index 7b9d6456..00000000 --- a/launch/api_client/model/list_triggers_v1_response.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ListTriggersV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "triggers", - } - - class properties: - - - class triggers( - schemas.ListSchema - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['GetTriggerV1Response']: - return GetTriggerV1Response - - def __new__( - cls, - _arg: typing.Union[typing.Tuple['GetTriggerV1Response'], typing.List['GetTriggerV1Response']], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'triggers': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> 'GetTriggerV1Response': - return super().__getitem__(i) - __annotations__ = { - "triggers": triggers, - } - - triggers: MetaOapg.properties.triggers - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["triggers", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["triggers", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - triggers: typing.Union[MetaOapg.properties.triggers, list, tuple, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ListTriggersV1Response': - return super().__new__( - cls, - *_args, - triggers=triggers, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.get_trigger_v1_response import ( - GetTriggerV1Response, -) diff --git a/launch/api_client/model/llm_fine_tune_event.py b/launch/api_client/model/llm_fine_tune_event.py deleted file mode 100644 index 954606a3..00000000 --- a/launch/api_client/model/llm_fine_tune_event.py +++ /dev/null @@ -1,124 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class LLMFineTuneEvent( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "level", - "message", - } - - class properties: - message = schemas.StrSchema - level = schemas.StrSchema - - - class timestamp( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'timestamp': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "message": message, - "level": level, - "timestamp": timestamp, - } - - level: MetaOapg.properties.level - message: MetaOapg.properties.message - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["message", "level", "timestamp", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["timestamp"]) -> typing.Union[MetaOapg.properties.timestamp, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["message", "level", "timestamp", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - level: typing.Union[MetaOapg.properties.level, str, ], - message: typing.Union[MetaOapg.properties.message, str, ], - timestamp: typing.Union[MetaOapg.properties.timestamp, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'LLMFineTuneEvent': - return super().__new__( - cls, - *_args, - level=level, - message=message, - timestamp=timestamp, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/llm_inference_framework.py b/launch/api_client/model/llm_inference_framework.py deleted file mode 100644 index cd6c0c75..00000000 --- a/launch/api_client/model/llm_inference_framework.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class LLMInferenceFramework( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "deepspeed": "DEEPSPEED", - "text_generation_inference": "TEXT_GENERATION_INFERENCE", - "vllm": "VLLM", - "lightllm": "LIGHTLLM", - "tensorrt_llm": "TENSORRT_LLM", - "sglang": "SGLANG", - } - - @schemas.classproperty - def DEEPSPEED(cls): - return cls("deepspeed") - - @schemas.classproperty - def TEXT_GENERATION_INFERENCE(cls): - return cls("text_generation_inference") - - @schemas.classproperty - def VLLM(cls): - return cls("vllm") - - @schemas.classproperty - def LIGHTLLM(cls): - return cls("lightllm") - - @schemas.classproperty - def TENSORRT_LLM(cls): - return cls("tensorrt_llm") - - @schemas.classproperty - def SGLANG(cls): - return cls("sglang") diff --git a/launch/api_client/model/llm_source.py b/launch/api_client/model/llm_source.py deleted file mode 100644 index d2fa2586..00000000 --- a/launch/api_client/model/llm_source.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class LLMSource( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "hugging_face": "HUGGING_FACE", - } - - @schemas.classproperty - def HUGGING_FACE(cls): - return cls("hugging_face") diff --git a/launch/api_client/model/logprobs.py b/launch/api_client/model/logprobs.py deleted file mode 100644 index ba8b60ae..00000000 --- a/launch/api_client/model/logprobs.py +++ /dev/null @@ -1,151 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Logprobs( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "refusal", - "content", - } - - class properties: - - - class content( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionTokenLogprob']: - return ChatCompletionTokenLogprob - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class refusal( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['ChatCompletionTokenLogprob']: - return ChatCompletionTokenLogprob - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'refusal': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "content": content, - "refusal": refusal, - } - - refusal: MetaOapg.properties.refusal - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "refusal", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "refusal", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - refusal: typing.Union[MetaOapg.properties.refusal, list, tuple, None, ], - content: typing.Union[MetaOapg.properties.content, list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Logprobs': - return super().__new__( - cls, - *_args, - refusal=refusal, - content=content, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.chat_completion_token_logprob import ( - ChatCompletionTokenLogprob, -) diff --git a/launch/api_client/model/logprobs2.py b/launch/api_client/model/logprobs2.py deleted file mode 100644 index 95390954..00000000 --- a/launch/api_client/model/logprobs2.py +++ /dev/null @@ -1,228 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Logprobs2( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class text_offset( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'text_offset': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class token_logprobs( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.NumberSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'token_logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokens( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class top_logprobs( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - - - class items( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.NumberSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, float, ], - ) -> 'items': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'top_logprobs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "text_offset": text_offset, - "token_logprobs": token_logprobs, - "tokens": tokens, - "top_logprobs": top_logprobs, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text_offset"]) -> MetaOapg.properties.text_offset: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token_logprobs"]) -> MetaOapg.properties.token_logprobs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokens"]) -> MetaOapg.properties.tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["text_offset", "token_logprobs", "tokens", "top_logprobs", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text_offset"]) -> typing.Union[MetaOapg.properties.text_offset, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token_logprobs"]) -> typing.Union[MetaOapg.properties.token_logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokens"]) -> typing.Union[MetaOapg.properties.tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> typing.Union[MetaOapg.properties.top_logprobs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["text_offset", "token_logprobs", "tokens", "top_logprobs", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - text_offset: typing.Union[MetaOapg.properties.text_offset, list, tuple, None, schemas.Unset] = schemas.unset, - token_logprobs: typing.Union[MetaOapg.properties.token_logprobs, list, tuple, None, schemas.Unset] = schemas.unset, - tokens: typing.Union[MetaOapg.properties.tokens, list, tuple, None, schemas.Unset] = schemas.unset, - top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, list, tuple, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'Logprobs2': - return super().__new__( - cls, - *_args, - text_offset=text_offset, - token_logprobs=token_logprobs, - tokens=tokens, - top_logprobs=top_logprobs, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/metadata.py b/launch/api_client/model/metadata.py deleted file mode 100644 index ea645e23..00000000 --- a/launch/api_client/model/metadata.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'Metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/model_bundle_environment_params.py b/launch/api_client/model/model_bundle_environment_params.py deleted file mode 100644 index a182d03e..00000000 --- a/launch/api_client/model/model_bundle_environment_params.py +++ /dev/null @@ -1,209 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelBundleEnvironmentParams( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle environment parameters. Being an -entity-layer class, it should be a plain data object. - """ - - - class MetaOapg: - required = { - "framework_type", - } - - class properties: - - @staticmethod - def framework_type() -> typing.Type['ModelBundleFrameworkType']: - return ModelBundleFrameworkType - - - class pytorch_image_tag( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'pytorch_image_tag': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tensorflow_version( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tensorflow_version': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ecr_repo( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ecr_repo': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class image_tag( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'image_tag': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "framework_type": framework_type, - "pytorch_image_tag": pytorch_image_tag, - "tensorflow_version": tensorflow_version, - "ecr_repo": ecr_repo, - "image_tag": image_tag, - } - - framework_type: 'ModelBundleFrameworkType' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> 'ModelBundleFrameworkType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> MetaOapg.properties.pytorch_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tensorflow_version"]) -> MetaOapg.properties.tensorflow_version: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ecr_repo"]) -> MetaOapg.properties.ecr_repo: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", "tensorflow_version", "ecr_repo", "image_tag", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> 'ModelBundleFrameworkType': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> typing.Union[MetaOapg.properties.pytorch_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tensorflow_version"]) -> typing.Union[MetaOapg.properties.tensorflow_version, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ecr_repo"]) -> typing.Union[MetaOapg.properties.ecr_repo, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> typing.Union[MetaOapg.properties.image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", "tensorflow_version", "ecr_repo", "image_tag", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - framework_type: 'ModelBundleFrameworkType', - pytorch_image_tag: typing.Union[MetaOapg.properties.pytorch_image_tag, None, str, schemas.Unset] = schemas.unset, - tensorflow_version: typing.Union[MetaOapg.properties.tensorflow_version, None, str, schemas.Unset] = schemas.unset, - ecr_repo: typing.Union[MetaOapg.properties.ecr_repo, None, str, schemas.Unset] = schemas.unset, - image_tag: typing.Union[MetaOapg.properties.image_tag, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ModelBundleEnvironmentParams': - return super().__new__( - cls, - *_args, - framework_type=framework_type, - pytorch_image_tag=pytorch_image_tag, - tensorflow_version=tensorflow_version, - ecr_repo=ecr_repo, - image_tag=image_tag, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.model_bundle_framework_type import ( - ModelBundleFrameworkType, -) diff --git a/launch/api_client/model/model_bundle_framework_type.py b/launch/api_client/model/model_bundle_framework_type.py deleted file mode 100644 index 12521da8..00000000 --- a/launch/api_client/model/model_bundle_framework_type.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelBundleFrameworkType( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The canonical list of possible machine learning frameworks of Model Bundles. - """ - - - class MetaOapg: - enum_value_to_name = { - "pytorch": "PYTORCH", - "tensorflow": "TENSORFLOW", - "custom_base_image": "CUSTOM_BASE_IMAGE", - } - - @schemas.classproperty - def PYTORCH(cls): - return cls("pytorch") - - @schemas.classproperty - def TENSORFLOW(cls): - return cls("tensorflow") - - @schemas.classproperty - def CUSTOM_BASE_IMAGE(cls): - return cls("custom_base_image") diff --git a/launch/api_client/model/model_bundle_order_by.py b/launch/api_client/model/model_bundle_order_by.py deleted file mode 100644 index 42a5c01c..00000000 --- a/launch/api_client/model/model_bundle_order_by.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelBundleOrderBy( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The canonical list of possible orderings of Model Bundles. - """ - - - class MetaOapg: - enum_value_to_name = { - "newest": "NEWEST", - "oldest": "OLDEST", - } - - @schemas.classproperty - def NEWEST(cls): - return cls("newest") - - @schemas.classproperty - def OLDEST(cls): - return cls("oldest") diff --git a/launch/api_client/model/model_bundle_packaging_type.py b/launch/api_client/model/model_bundle_packaging_type.py deleted file mode 100644 index a67cfd36..00000000 --- a/launch/api_client/model/model_bundle_packaging_type.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelBundlePackagingType( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The canonical list of possible packaging types for Model Bundles. - -These values broadly determine how the model endpoint will obtain its code & dependencies. - """ - - - class MetaOapg: - enum_value_to_name = { - "cloudpickle": "CLOUDPICKLE", - "zip": "ZIP", - "lira": "LIRA", - } - - @schemas.classproperty - def CLOUDPICKLE(cls): - return cls("cloudpickle") - - @schemas.classproperty - def ZIP(cls): - return cls("zip") - - @schemas.classproperty - def LIRA(cls): - return cls("lira") diff --git a/launch/api_client/model/model_bundle_v1_response.py b/launch/api_client/model/model_bundle_v1_response.py deleted file mode 100644 index 561665b8..00000000 --- a/launch/api_client/model/model_bundle_v1_response.py +++ /dev/null @@ -1,337 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelBundleV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for a single Model Bundle. - """ - - - class MetaOapg: - required = { - "metadata", - "requirements", - "model_artifact_ids", - "packaging_type", - "name", - "created_at", - "location", - "id", - "env_params", - } - - class properties: - id = schemas.StrSchema - name = schemas.StrSchema - location = schemas.StrSchema - - - class requirements( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'requirements': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - @staticmethod - def env_params() -> typing.Type['ModelBundleEnvironmentParams']: - return ModelBundleEnvironmentParams - - @staticmethod - def packaging_type() -> typing.Type['ModelBundlePackagingType']: - return ModelBundlePackagingType - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - created_at = schemas.DateTimeSchema - - - class model_artifact_ids( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_artifact_ids': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class app_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'app_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class schema_location( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'schema_location': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "id": id, - "name": name, - "location": location, - "requirements": requirements, - "env_params": env_params, - "packaging_type": packaging_type, - "metadata": metadata, - "created_at": created_at, - "model_artifact_ids": model_artifact_ids, - "app_config": app_config, - "schema_location": schema_location, - } - - metadata: MetaOapg.properties.metadata - requirements: MetaOapg.properties.requirements - model_artifact_ids: MetaOapg.properties.model_artifact_ids - packaging_type: 'ModelBundlePackagingType' - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - location: MetaOapg.properties.location - id: MetaOapg.properties.id - env_params: 'ModelBundleEnvironmentParams' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "location", "requirements", "env_params", "packaging_type", "metadata", "created_at", "model_artifact_ids", "app_config", "schema_location", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "location", "requirements", "env_params", "packaging_type", "metadata", "created_at", "model_artifact_ids", "app_config", "schema_location", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], - model_artifact_ids: typing.Union[MetaOapg.properties.model_artifact_ids, list, tuple, ], - packaging_type: 'ModelBundlePackagingType', - name: typing.Union[MetaOapg.properties.name, str, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], - location: typing.Union[MetaOapg.properties.location, str, ], - id: typing.Union[MetaOapg.properties.id, str, ], - env_params: 'ModelBundleEnvironmentParams', - app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - schema_location: typing.Union[MetaOapg.properties.schema_location, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ModelBundleV1Response': - return super().__new__( - cls, - *_args, - metadata=metadata, - requirements=requirements, - model_artifact_ids=model_artifact_ids, - packaging_type=packaging_type, - name=name, - created_at=created_at, - location=location, - id=id, - env_params=env_params, - app_config=app_config, - schema_location=schema_location, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch.api_client.model.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) diff --git a/launch/api_client/model/model_bundle_v2_response.py b/launch/api_client/model/model_bundle_v2_response.py deleted file mode 100644 index 3d23be2d..00000000 --- a/launch/api_client/model/model_bundle_v2_response.py +++ /dev/null @@ -1,275 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelBundleV2Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for a single Model Bundle. - """ - - - class MetaOapg: - required = { - "flavor", - "metadata", - "model_artifact_ids", - "name", - "created_at", - "id", - } - - class properties: - id = schemas.StrSchema - name = schemas.StrSchema - - - class metadata( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - created_at = schemas.DateTimeSchema - - - class model_artifact_ids( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_artifact_ids': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class flavor( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CloudpickleArtifactFlavor, - ZipArtifactFlavor, - RunnableImageFlavor, - StreamingEnhancedRunnableImageFlavor, - TritonEnhancedRunnableImageFlavor, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'flavor': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class schema_location( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'schema_location': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "id": id, - "name": name, - "metadata": metadata, - "created_at": created_at, - "model_artifact_ids": model_artifact_ids, - "flavor": flavor, - "schema_location": schema_location, - } - - flavor: MetaOapg.properties.flavor - metadata: MetaOapg.properties.metadata - model_artifact_ids: MetaOapg.properties.model_artifact_ids - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "metadata", "created_at", "model_artifact_ids", "flavor", "schema_location", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "metadata", "created_at", "model_artifact_ids", "flavor", "schema_location", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - flavor: typing.Union[MetaOapg.properties.flavor, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], - model_artifact_ids: typing.Union[MetaOapg.properties.model_artifact_ids, list, tuple, ], - name: typing.Union[MetaOapg.properties.name, str, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], - id: typing.Union[MetaOapg.properties.id, str, ], - schema_location: typing.Union[MetaOapg.properties.schema_location, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ModelBundleV2Response': - return super().__new__( - cls, - *_args, - flavor=flavor, - metadata=metadata, - model_artifact_ids=model_artifact_ids, - name=name, - created_at=created_at, - id=id, - schema_location=schema_location, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) -from launch.api_client.model.runnable_image_flavor import RunnableImageFlavor -from launch.api_client.model.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) -from launch.api_client.model.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) -from launch.api_client.model.zip_artifact_flavor import ZipArtifactFlavor diff --git a/launch/api_client/model/model_download_request.py b/launch/api_client/model/model_download_request.py deleted file mode 100644 index 6b0dbca7..00000000 --- a/launch/api_client/model/model_download_request.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelDownloadRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "model_name", - } - - class properties: - model_name = schemas.StrSchema - - - class download_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'download_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "model_name": model_name, - "download_format": download_format, - } - - model_name: MetaOapg.properties.model_name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["download_format"]) -> MetaOapg.properties.download_format: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_name", "download_format", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["download_format"]) -> typing.Union[MetaOapg.properties.download_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_name", "download_format", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_name: typing.Union[MetaOapg.properties.model_name, str, ], - download_format: typing.Union[MetaOapg.properties.download_format, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ModelDownloadRequest': - return super().__new__( - cls, - *_args, - model_name=model_name, - download_format=download_format, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/model_download_response.py b/launch/api_client/model/model_download_response.py deleted file mode 100644 index 2f631380..00000000 --- a/launch/api_client/model/model_download_response.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelDownloadResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "urls", - } - - class properties: - - - class urls( - schemas.DictSchema - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'urls': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "urls": urls, - } - - urls: MetaOapg.properties.urls - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["urls", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["urls", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - urls: typing.Union[MetaOapg.properties.urls, dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ModelDownloadResponse': - return super().__new__( - cls, - *_args, - urls=urls, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/model_endpoint_deployment_state.py b/launch/api_client/model/model_endpoint_deployment_state.py deleted file mode 100644 index db50ffc6..00000000 --- a/launch/api_client/model/model_endpoint_deployment_state.py +++ /dev/null @@ -1,203 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelEndpointDeploymentState( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the deployment settings related to a Model Endpoint. - """ - - - class MetaOapg: - required = { - "max_workers", - "min_workers", - "concurrent_requests_per_worker", - "per_worker", - } - - class properties: - - - class min_workers( - schemas.IntSchema - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - class max_workers( - schemas.IntSchema - ): - - - class MetaOapg: - inclusive_minimum = 0 - per_worker = schemas.IntSchema - concurrent_requests_per_worker = schemas.IntSchema - - - class available_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'available_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class unavailable_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'unavailable_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "concurrent_requests_per_worker": concurrent_requests_per_worker, - "available_workers": available_workers, - "unavailable_workers": unavailable_workers, - } - - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - concurrent_requests_per_worker: MetaOapg.properties.concurrent_requests_per_worker - per_worker: MetaOapg.properties.per_worker - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["available_workers"]) -> MetaOapg.properties.available_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["unavailable_workers"]) -> MetaOapg.properties.unavailable_workers: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "available_workers", "unavailable_workers", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["available_workers"]) -> typing.Union[MetaOapg.properties.available_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["unavailable_workers"]) -> typing.Union[MetaOapg.properties.unavailable_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "available_workers", "unavailable_workers", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], - concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, decimal.Decimal, int, ], - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], - available_workers: typing.Union[MetaOapg.properties.available_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - unavailable_workers: typing.Union[MetaOapg.properties.unavailable_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ModelEndpointDeploymentState': - return super().__new__( - cls, - *_args, - max_workers=max_workers, - min_workers=min_workers, - concurrent_requests_per_worker=concurrent_requests_per_worker, - per_worker=per_worker, - available_workers=available_workers, - unavailable_workers=unavailable_workers, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/model_endpoint_order_by.py b/launch/api_client/model/model_endpoint_order_by.py deleted file mode 100644 index a14d1946..00000000 --- a/launch/api_client/model/model_endpoint_order_by.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelEndpointOrderBy( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The canonical list of possible orderings of Model Bundles. - """ - - - class MetaOapg: - enum_value_to_name = { - "newest": "NEWEST", - "oldest": "OLDEST", - "alphabetical": "ALPHABETICAL", - } - - @schemas.classproperty - def NEWEST(cls): - return cls("newest") - - @schemas.classproperty - def OLDEST(cls): - return cls("oldest") - - @schemas.classproperty - def ALPHABETICAL(cls): - return cls("alphabetical") diff --git a/launch/api_client/model/model_endpoint_resource_state.py b/launch/api_client/model/model_endpoint_resource_state.py deleted file mode 100644 index a613f5af..00000000 --- a/launch/api_client/model/model_endpoint_resource_state.py +++ /dev/null @@ -1,317 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelEndpointResourceState( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the resource settings per worker of a Model Endpoint. -Note: in the multinode case, there are multiple "nodes" per "worker". -"Nodes" is analogous to a single k8s pod that may take up all the GPUs on a single machine. -"Workers" is the smallest unit that a request can be made to, and consists of one leader "node" and -multiple follower "nodes" (named "worker" in the k8s LeaderWorkerSet definition). -cpus/gpus/memory/storage are per-node, thus the total consumption by a "worker" -is cpus/gpus/etc. multiplied by nodes_per_worker. - """ - - - class MetaOapg: - required = { - "memory", - "cpus", - "gpus", - "nodes_per_worker", - } - - class properties: - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntSchema - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntSchema - ): - - - class MetaOapg: - inclusive_minimum = 1 - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "nodes_per_worker": nodes_per_worker, - "gpu_type": gpu_type, - "storage": storage, - "optimize_costs": optimize_costs, - } - - memory: MetaOapg.properties.memory - cpus: MetaOapg.properties.cpus - gpus: MetaOapg.properties.gpus - nodes_per_worker: MetaOapg.properties.nodes_per_worker - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["cpus", "gpus", "memory", "nodes_per_worker", "gpu_type", "storage", "optimize_costs", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cpus", "gpus", "memory", "nodes_per_worker", "gpu_type", "storage", "optimize_costs", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, ], - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, decimal.Decimal, int, ], - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ModelEndpointResourceState': - return super().__new__( - cls, - *_args, - memory=memory, - cpus=cpus, - gpus=gpus, - nodes_per_worker=nodes_per_worker, - gpu_type=gpu_type, - storage=storage, - optimize_costs=optimize_costs, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/model_endpoint_status.py b/launch/api_client/model/model_endpoint_status.py deleted file mode 100644 index 7623e0a0..00000000 --- a/launch/api_client/model/model_endpoint_status.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelEndpointStatus( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "READY": "READY", - "UPDATE_PENDING": "UPDATE_PENDING", - "UPDATE_IN_PROGRESS": "UPDATE_IN_PROGRESS", - "UPDATE_FAILED": "UPDATE_FAILED", - "DELETE_IN_PROGRESS": "DELETE_IN_PROGRESS", - } - - @schemas.classproperty - def READY(cls): - return cls("READY") - - @schemas.classproperty - def UPDATE_PENDING(cls): - return cls("UPDATE_PENDING") - - @schemas.classproperty - def UPDATE_IN_PROGRESS(cls): - return cls("UPDATE_IN_PROGRESS") - - @schemas.classproperty - def UPDATE_FAILED(cls): - return cls("UPDATE_FAILED") - - @schemas.classproperty - def DELETE_IN_PROGRESS(cls): - return cls("DELETE_IN_PROGRESS") diff --git a/launch/api_client/model/model_endpoint_type.py b/launch/api_client/model/model_endpoint_type.py deleted file mode 100644 index 8122d04d..00000000 --- a/launch/api_client/model/model_endpoint_type.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ModelEndpointType( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "async": "ASYNC", - "sync": "SYNC", - "streaming": "STREAMING", - } - - @schemas.classproperty - def ASYNC(cls): - return cls("async") - - @schemas.classproperty - def SYNC(cls): - return cls("sync") - - @schemas.classproperty - def STREAMING(cls): - return cls("streaming") diff --git a/launch/api_client/model/parallel_tool_calls.py b/launch/api_client/model/parallel_tool_calls.py deleted file mode 100644 index 0649343d..00000000 --- a/launch/api_client/model/parallel_tool_calls.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - -ParallelToolCalls = schemas.BoolSchema diff --git a/launch/api_client/model/prediction_content.py b/launch/api_client/model/prediction_content.py deleted file mode 100644 index cb956908..00000000 --- a/launch/api_client/model/prediction_content.py +++ /dev/null @@ -1,149 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class PredictionContent( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "type", - "content", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "content": "CONTENT", - } - - @schemas.classproperty - def CONTENT(cls): - return cls("content") - - - class content( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - Content8, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'content': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "type": type, - "content": content, - } - - type: MetaOapg.properties.type - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "content", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "content", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - type: typing.Union[MetaOapg.properties.type, str, ], - content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'PredictionContent': - return super().__new__( - cls, - *_args, - type=type, - content=content, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.content8 import Content8 diff --git a/launch/api_client/model/prompt.py b/launch/api_client/model/prompt.py deleted file mode 100644 index 7a7c3a52..00000000 --- a/launch/api_client/model/prompt.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Prompt( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - -Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - - """ - - - class MetaOapg: - items = schemas.IntSchema - min_items = 1 - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) diff --git a/launch/api_client/model/prompt1.py b/launch/api_client/model/prompt1.py deleted file mode 100644 index 62168649..00000000 --- a/launch/api_client/model/prompt1.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Prompt1( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - -Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - - """ - - - class MetaOapg: - - @staticmethod - def items() -> typing.Type['Prompt1Item']: - return Prompt1Item - min_items = 1 - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Prompt1': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - -from launch.api_client.model.prompt1_item import Prompt1Item diff --git a/launch/api_client/model/prompt1_item.py b/launch/api_client/model/prompt1_item.py deleted file mode 100644 index 287b2355..00000000 --- a/launch/api_client/model/prompt1_item.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Prompt1Item( - schemas.ListSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - min_items = 1 - items = schemas.IntSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, decimal.Decimal, int, ]], typing.List[typing.Union[MetaOapg.items, decimal.Decimal, int, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'Prompt1Item': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) diff --git a/launch/api_client/model/prompt_tokens_details.py b/launch/api_client/model/prompt_tokens_details.py deleted file mode 100644 index 3f253fa9..00000000 --- a/launch/api_client/model/prompt_tokens_details.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class PromptTokensDetails( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - audio_tokens = schemas.IntSchema - cached_tokens = schemas.IntSchema - __annotations__ = { - "audio_tokens": audio_tokens, - "cached_tokens": cached_tokens, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["audio_tokens"]) -> MetaOapg.properties.audio_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cached_tokens"]) -> MetaOapg.properties.cached_tokens: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["audio_tokens", "cached_tokens", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["audio_tokens"]) -> typing.Union[MetaOapg.properties.audio_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cached_tokens"]) -> typing.Union[MetaOapg.properties.cached_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["audio_tokens", "cached_tokens", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - audio_tokens: typing.Union[MetaOapg.properties.audio_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, - cached_tokens: typing.Union[MetaOapg.properties.cached_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'PromptTokensDetails': - return super().__new__( - cls, - *_args, - audio_tokens=audio_tokens, - cached_tokens=cached_tokens, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/pytorch_framework.py b/launch/api_client/model/pytorch_framework.py deleted file mode 100644 index d5e96a8e..00000000 --- a/launch/api_client/model/pytorch_framework.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class PytorchFramework( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a Pytorch framework specification. - """ - - - class MetaOapg: - required = { - "pytorch_image_tag", - "framework_type", - } - - class properties: - - - class framework_type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "pytorch": "PYTORCH", - } - - @schemas.classproperty - def PYTORCH(cls): - return cls("pytorch") - pytorch_image_tag = schemas.StrSchema - __annotations__ = { - "framework_type": framework_type, - "pytorch_image_tag": pytorch_image_tag, - } - - pytorch_image_tag: MetaOapg.properties.pytorch_image_tag - framework_type: MetaOapg.properties.framework_type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> MetaOapg.properties.pytorch_image_tag: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> MetaOapg.properties.pytorch_image_tag: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - pytorch_image_tag: typing.Union[MetaOapg.properties.pytorch_image_tag, str, ], - framework_type: typing.Union[MetaOapg.properties.framework_type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'PytorchFramework': - return super().__new__( - cls, - *_args, - pytorch_image_tag=pytorch_image_tag, - framework_type=framework_type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/quantization.py b/launch/api_client/model/quantization.py deleted file mode 100644 index f9564c4a..00000000 --- a/launch/api_client/model/quantization.py +++ /dev/null @@ -1,49 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class Quantization( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "bitsandbytes": "BITSANDBYTES", - "awq": "AWQ", - } - - @schemas.classproperty - def BITSANDBYTES(cls): - return cls("bitsandbytes") - - @schemas.classproperty - def AWQ(cls): - return cls("awq") diff --git a/launch/api_client/model/reasoning_effort.py b/launch/api_client/model/reasoning_effort.py deleted file mode 100644 index ae2994da..00000000 --- a/launch/api_client/model/reasoning_effort.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ReasoningEffort( - schemas.EnumBase, - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - **o-series models only** - -Constrains effort on reasoning for -[reasoning models](https://platform.openai.com/docs/guides/reasoning). -Currently supported values are `low`, `medium`, and `high`. Reducing -reasoning effort can result in faster responses and fewer tokens used -on reasoning in a response. - - """ - - - class MetaOapg: - enum_value_to_name = { - "low": "LOW", - "medium": "MEDIUM", - "high": "HIGH", - } - - @schemas.classproperty - def LOW(cls): - return cls("low") - - @schemas.classproperty - def MEDIUM(cls): - return cls("medium") - - @schemas.classproperty - def HIGH(cls): - return cls("high") - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ReasoningEffort': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) diff --git a/launch/api_client/model/request_schema.py b/launch/api_client/model/request_schema.py deleted file mode 100644 index 48c12333..00000000 --- a/launch/api_client/model/request_schema.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - -RequestSchema = schemas.AnyTypeSchema diff --git a/launch/api_client/model/response_format_json_object.py b/launch/api_client/model/response_format_json_object.py deleted file mode 100644 index 0006c89a..00000000 --- a/launch/api_client/model/response_format_json_object.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ResponseFormatJsonObject( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "json_object": "JSON_OBJECT", - } - - @schemas.classproperty - def JSON_OBJECT(cls): - return cls("json_object") - __annotations__ = { - "type": type, - } - - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ResponseFormatJsonObject': - return super().__new__( - cls, - *_args, - type=type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/response_format_json_schema.py b/launch/api_client/model/response_format_json_schema.py deleted file mode 100644 index 6173317a..00000000 --- a/launch/api_client/model/response_format_json_schema.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ResponseFormatJsonSchema( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "json_schema", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "json_schema": "JSON_SCHEMA", - } - - @schemas.classproperty - def JSON_SCHEMA(cls): - return cls("json_schema") - - @staticmethod - def json_schema() -> typing.Type['JsonSchema']: - return JsonSchema - __annotations__ = { - "type": type, - "json_schema": json_schema, - } - - json_schema: 'JsonSchema' - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["json_schema"]) -> 'JsonSchema': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "json_schema", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["json_schema"]) -> 'JsonSchema': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "json_schema", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - json_schema: 'JsonSchema', - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ResponseFormatJsonSchema': - return super().__new__( - cls, - *_args, - json_schema=json_schema, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.json_schema import JsonSchema diff --git a/launch/api_client/model/response_format_json_schema_schema.py b/launch/api_client/model/response_format_json_schema_schema.py deleted file mode 100644 index 9f7aec5c..00000000 --- a/launch/api_client/model/response_format_json_schema_schema.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ResponseFormatJsonSchemaSchema( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'ResponseFormatJsonSchemaSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/response_format_text.py b/launch/api_client/model/response_format_text.py deleted file mode 100644 index f9a2c24e..00000000 --- a/launch/api_client/model/response_format_text.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ResponseFormatText( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "text": "TEXT", - } - - @schemas.classproperty - def TEXT(cls): - return cls("text") - __annotations__ = { - "type": type, - } - - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ResponseFormatText': - return super().__new__( - cls, - *_args, - type=type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/response_modalities.py b/launch/api_client/model/response_modalities.py deleted file mode 100644 index 7e7a7b41..00000000 --- a/launch/api_client/model/response_modalities.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ResponseModalities( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Output types that you would like the model to generate. -Most models are capable of generating text, which is the default: - -`["text"]` - -The `gpt-4o-audio-preview` model can also be used to -[generate audio](/docs/guides/audio). To request that this model generate -both text and audio responses, you can use: - -`["text", "audio"]` - - """ - - - class MetaOapg: - - - class items( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "text": "TEXT", - "audio": "AUDIO", - } - - @schemas.classproperty - def TEXT(cls): - return cls("text") - - @schemas.classproperty - def AUDIO(cls): - return cls("audio") - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ResponseModalities': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) diff --git a/launch/api_client/model/response_schema.py b/launch/api_client/model/response_schema.py deleted file mode 100644 index ac2a8608..00000000 --- a/launch/api_client/model/response_schema.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - -ResponseSchema = schemas.AnyTypeSchema diff --git a/launch/api_client/model/restart_model_endpoint_v1_response.py b/launch/api_client/model/restart_model_endpoint_v1_response.py deleted file mode 100644 index 9ba6e0a8..00000000 --- a/launch/api_client/model/restart_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class RestartModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "restarted", - } - - class properties: - restarted = schemas.BoolSchema - __annotations__ = { - "restarted": restarted, - } - - restarted: MetaOapg.properties.restarted - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["restarted"]) -> MetaOapg.properties.restarted: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["restarted", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["restarted"]) -> MetaOapg.properties.restarted: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["restarted", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - restarted: typing.Union[MetaOapg.properties.restarted, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'RestartModelEndpointV1Response': - return super().__new__( - cls, - *_args, - restarted=restarted, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/runnable_image_flavor.py b/launch/api_client/model/runnable_image_flavor.py deleted file mode 100644 index 46726cb4..00000000 --- a/launch/api_client/model/runnable_image_flavor.py +++ /dev/null @@ -1,425 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class RunnableImageFlavor( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle flavor of a runnable image. - """ - - - class MetaOapg: - required = { - "flavor", - "protocol", - "tag", - "repository", - "command", - } - - class properties: - repository = schemas.StrSchema - tag = schemas.StrSchema - - - class command( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'command': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class protocol( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "http": "HTTP", - } - - @schemas.classproperty - def HTTP(cls): - return cls("http") - - - class flavor( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "runnable_image": "RUNNABLE_IMAGE", - } - - @schemas.classproperty - def RUNNABLE_IMAGE(cls): - return cls("runnable_image") - predict_route = schemas.StrSchema - healthcheck_route = schemas.StrSchema - - - class env( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - readiness_initial_delay_seconds = schemas.IntSchema - - - class extra_routes( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'extra_routes': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class routes( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'routes': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class forwarder_type( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'forwarder_type': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class worker_command( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'worker_command': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class worker_env( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'worker_env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "repository": repository, - "tag": tag, - "command": command, - "protocol": protocol, - "flavor": flavor, - "predict_route": predict_route, - "healthcheck_route": healthcheck_route, - "env": env, - "readiness_initial_delay_seconds": readiness_initial_delay_seconds, - "extra_routes": extra_routes, - "routes": routes, - "forwarder_type": forwarder_type, - "worker_command": worker_command, - "worker_env": worker_env, - } - - flavor: MetaOapg.properties.flavor - protocol: MetaOapg.properties.protocol - tag: MetaOapg.properties.tag - repository: MetaOapg.properties.repository - command: MetaOapg.properties.command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["healthcheck_route"]) -> MetaOapg.properties.healthcheck_route: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> MetaOapg.properties.readiness_initial_delay_seconds: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["extra_routes"]) -> MetaOapg.properties.extra_routes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["routes"]) -> MetaOapg.properties.routes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["forwarder_type"]) -> MetaOapg.properties.forwarder_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["worker_command"]) -> MetaOapg.properties.worker_command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["worker_env"]) -> MetaOapg.properties.worker_env: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["predict_route"]) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["healthcheck_route"]) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["extra_routes"]) -> typing.Union[MetaOapg.properties.extra_routes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["routes"]) -> typing.Union[MetaOapg.properties.routes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["forwarder_type"]) -> typing.Union[MetaOapg.properties.forwarder_type, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["worker_command"]) -> typing.Union[MetaOapg.properties.worker_command, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["worker_env"]) -> typing.Union[MetaOapg.properties.worker_env, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - flavor: typing.Union[MetaOapg.properties.flavor, str, ], - protocol: typing.Union[MetaOapg.properties.protocol, str, ], - tag: typing.Union[MetaOapg.properties.tag, str, ], - repository: typing.Union[MetaOapg.properties.repository, str, ], - command: typing.Union[MetaOapg.properties.command, list, tuple, ], - predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, - extra_routes: typing.Union[MetaOapg.properties.extra_routes, list, tuple, schemas.Unset] = schemas.unset, - routes: typing.Union[MetaOapg.properties.routes, list, tuple, schemas.Unset] = schemas.unset, - forwarder_type: typing.Union[MetaOapg.properties.forwarder_type, None, str, schemas.Unset] = schemas.unset, - worker_command: typing.Union[MetaOapg.properties.worker_command, list, tuple, None, schemas.Unset] = schemas.unset, - worker_env: typing.Union[MetaOapg.properties.worker_env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'RunnableImageFlavor': - return super().__new__( - cls, - *_args, - flavor=flavor, - protocol=protocol, - tag=tag, - repository=repository, - command=command, - predict_route=predict_route, - healthcheck_route=healthcheck_route, - env=env, - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - extra_routes=extra_routes, - routes=routes, - forwarder_type=forwarder_type, - worker_command=worker_command, - worker_env=worker_env, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/service_tier.py b/launch/api_client/model/service_tier.py deleted file mode 100644 index dc462fc7..00000000 --- a/launch/api_client/model/service_tier.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ServiceTier( - schemas.EnumBase, - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', and the Project is Scale tier enabled, the system - will utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). - - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` utilized. - - """ - - - class MetaOapg: - enum_value_to_name = { - "auto": "AUTO", - "default": "DEFAULT", - "flex": "FLEX", - } - - @schemas.classproperty - def AUTO(cls): - return cls("auto") - - @schemas.classproperty - def DEFAULT(cls): - return cls("default") - - @schemas.classproperty - def FLEX(cls): - return cls("flex") - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ServiceTier': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) diff --git a/launch/api_client/model/stop_configuration.py b/launch/api_client/model/stop_configuration.py deleted file mode 100644 index d69582b5..00000000 --- a/launch/api_client/model/stop_configuration.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class StopConfiguration( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Not supported with latest reasoning models `o3` and `o4-mini`. - -Up to 4 sequences where the API will stop generating further tokens. The -returned text will not contain the stop sequence. - - """ - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - StopConfiguration1, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'StopConfiguration': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.stop_configuration1 import StopConfiguration1 diff --git a/launch/api_client/model/stop_configuration1.py b/launch/api_client/model/stop_configuration1.py deleted file mode 100644 index fb3241c0..00000000 --- a/launch/api_client/model/stop_configuration1.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class StopConfiguration1( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Not supported with latest reasoning models `o3` and `o4-mini`. - -Up to 4 sequences where the API will stop generating further tokens. The -returned text will not contain the stop sequence. - - """ - - - class MetaOapg: - items = schemas.StrSchema - max_items = 4 - min_items = 1 - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'StopConfiguration1': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) diff --git a/launch/api_client/model/stream_error.py b/launch/api_client/model/stream_error.py deleted file mode 100644 index 9e2c9c5e..00000000 --- a/launch/api_client/model/stream_error.py +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class StreamError( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Error object for a stream prompt completion task. - """ - - - class MetaOapg: - required = { - "status_code", - "content", - } - - class properties: - status_code = schemas.IntSchema - - @staticmethod - def content() -> typing.Type['StreamErrorContent']: - return StreamErrorContent - __annotations__ = { - "status_code": status_code, - "content": content, - } - - status_code: MetaOapg.properties.status_code - content: 'StreamErrorContent' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> 'StreamErrorContent': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["status_code", "content", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> 'StreamErrorContent': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status_code", "content", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - status_code: typing.Union[MetaOapg.properties.status_code, decimal.Decimal, int, ], - content: 'StreamErrorContent', - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'StreamError': - return super().__new__( - cls, - *_args, - status_code=status_code, - content=content, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.stream_error_content import StreamErrorContent diff --git a/launch/api_client/model/stream_error_content.py b/launch/api_client/model/stream_error_content.py deleted file mode 100644 index 41b7e854..00000000 --- a/launch/api_client/model/stream_error_content.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class StreamErrorContent( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "error", - "timestamp", - } - - class properties: - error = schemas.StrSchema - timestamp = schemas.StrSchema - __annotations__ = { - "error": error, - "timestamp": timestamp, - } - - error: MetaOapg.properties.error - timestamp: MetaOapg.properties.timestamp - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["error", "timestamp", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["error", "timestamp", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - error: typing.Union[MetaOapg.properties.error, str, ], - timestamp: typing.Union[MetaOapg.properties.timestamp, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'StreamErrorContent': - return super().__new__( - cls, - *_args, - error=error, - timestamp=timestamp, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/streaming_enhanced_runnable_image_flavor.py b/launch/api_client/model/streaming_enhanced_runnable_image_flavor.py deleted file mode 100644 index 1849bea3..00000000 --- a/launch/api_client/model/streaming_enhanced_runnable_image_flavor.py +++ /dev/null @@ -1,467 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class StreamingEnhancedRunnableImageFlavor( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - For deployments that expose a streaming route in a container. - """ - - - class MetaOapg: - required = { - "flavor", - "protocol", - "tag", - "repository", - "streaming_command", - } - - class properties: - repository = schemas.StrSchema - tag = schemas.StrSchema - - - class protocol( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "http": "HTTP", - } - - @schemas.classproperty - def HTTP(cls): - return cls("http") - - - class flavor( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "streaming_enhanced_runnable_image": "STREAMING_ENHANCED_RUNNABLE_IMAGE", - } - - @schemas.classproperty - def STREAMING_ENHANCED_RUNNABLE_IMAGE(cls): - return cls("streaming_enhanced_runnable_image") - - - class streaming_command( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'streaming_command': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class command( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'command': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - predict_route = schemas.StrSchema - healthcheck_route = schemas.StrSchema - - - class env( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - readiness_initial_delay_seconds = schemas.IntSchema - - - class extra_routes( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'extra_routes': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class routes( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'routes': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class forwarder_type( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'forwarder_type': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class worker_command( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'worker_command': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class worker_env( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'worker_env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - streaming_predict_route = schemas.StrSchema - __annotations__ = { - "repository": repository, - "tag": tag, - "protocol": protocol, - "flavor": flavor, - "streaming_command": streaming_command, - "command": command, - "predict_route": predict_route, - "healthcheck_route": healthcheck_route, - "env": env, - "readiness_initial_delay_seconds": readiness_initial_delay_seconds, - "extra_routes": extra_routes, - "routes": routes, - "forwarder_type": forwarder_type, - "worker_command": worker_command, - "worker_env": worker_env, - "streaming_predict_route": streaming_predict_route, - } - - flavor: MetaOapg.properties.flavor - protocol: MetaOapg.properties.protocol - tag: MetaOapg.properties.tag - repository: MetaOapg.properties.repository - streaming_command: MetaOapg.properties.streaming_command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["streaming_command"]) -> MetaOapg.properties.streaming_command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["healthcheck_route"]) -> MetaOapg.properties.healthcheck_route: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> MetaOapg.properties.readiness_initial_delay_seconds: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["extra_routes"]) -> MetaOapg.properties.extra_routes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["routes"]) -> MetaOapg.properties.routes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["forwarder_type"]) -> MetaOapg.properties.forwarder_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["worker_command"]) -> MetaOapg.properties.worker_command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["worker_env"]) -> MetaOapg.properties.worker_env: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["streaming_predict_route"]) -> MetaOapg.properties.streaming_predict_route: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "protocol", "flavor", "streaming_command", "command", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "streaming_predict_route", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["streaming_command"]) -> MetaOapg.properties.streaming_command: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> typing.Union[MetaOapg.properties.command, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["predict_route"]) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["healthcheck_route"]) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["extra_routes"]) -> typing.Union[MetaOapg.properties.extra_routes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["routes"]) -> typing.Union[MetaOapg.properties.routes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["forwarder_type"]) -> typing.Union[MetaOapg.properties.forwarder_type, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["worker_command"]) -> typing.Union[MetaOapg.properties.worker_command, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["worker_env"]) -> typing.Union[MetaOapg.properties.worker_env, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["streaming_predict_route"]) -> typing.Union[MetaOapg.properties.streaming_predict_route, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "protocol", "flavor", "streaming_command", "command", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "streaming_predict_route", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - flavor: typing.Union[MetaOapg.properties.flavor, str, ], - protocol: typing.Union[MetaOapg.properties.protocol, str, ], - tag: typing.Union[MetaOapg.properties.tag, str, ], - repository: typing.Union[MetaOapg.properties.repository, str, ], - streaming_command: typing.Union[MetaOapg.properties.streaming_command, list, tuple, ], - command: typing.Union[MetaOapg.properties.command, list, tuple, schemas.Unset] = schemas.unset, - predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, - extra_routes: typing.Union[MetaOapg.properties.extra_routes, list, tuple, schemas.Unset] = schemas.unset, - routes: typing.Union[MetaOapg.properties.routes, list, tuple, schemas.Unset] = schemas.unset, - forwarder_type: typing.Union[MetaOapg.properties.forwarder_type, None, str, schemas.Unset] = schemas.unset, - worker_command: typing.Union[MetaOapg.properties.worker_command, list, tuple, None, schemas.Unset] = schemas.unset, - worker_env: typing.Union[MetaOapg.properties.worker_env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - streaming_predict_route: typing.Union[MetaOapg.properties.streaming_predict_route, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'StreamingEnhancedRunnableImageFlavor': - return super().__new__( - cls, - *_args, - flavor=flavor, - protocol=protocol, - tag=tag, - repository=repository, - streaming_command=streaming_command, - command=command, - predict_route=predict_route, - healthcheck_route=healthcheck_route, - env=env, - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - extra_routes=extra_routes, - routes=routes, - forwarder_type=forwarder_type, - worker_command=worker_command, - worker_env=worker_env, - streaming_predict_route=streaming_predict_route, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/sync_endpoint_predict_v1_request.py b/launch/api_client/model/sync_endpoint_predict_v1_request.py deleted file mode 100644 index dd5f3cb8..00000000 --- a/launch/api_client/model/sync_endpoint_predict_v1_request.py +++ /dev/null @@ -1,281 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class SyncEndpointPredictV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - args = schemas.AnyTypeSchema - - - class cloudpickle( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cloudpickle': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - return_pickled = schemas.BoolSchema - - - class destination_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'destination_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class timeout_seconds( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'timeout_seconds': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_retries( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_retries': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "url": url, - "args": args, - "cloudpickle": cloudpickle, - "callback_url": callback_url, - "callback_auth": callback_auth, - "return_pickled": return_pickled, - "destination_path": destination_path, - "timeout_seconds": timeout_seconds, - "num_retries": num_retries, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["destination_path"]) -> MetaOapg.properties.destination_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timeout_seconds"]) -> MetaOapg.properties.timeout_seconds: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_retries"]) -> MetaOapg.properties.num_retries: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", "timeout_seconds", "num_retries", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["args"]) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cloudpickle"]) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["callback_url"]) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["return_pickled"]) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["destination_path"]) -> typing.Union[MetaOapg.properties.destination_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["timeout_seconds"]) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_retries"]) -> typing.Union[MetaOapg.properties.num_retries, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", "timeout_seconds", "num_retries", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - url: typing.Union[MetaOapg.properties.url, None, str, schemas.Unset] = schemas.unset, - args: typing.Union[MetaOapg.properties.args, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, None, str, schemas.Unset] = schemas.unset, - callback_url: typing.Union[MetaOapg.properties.callback_url, None, str, schemas.Unset] = schemas.unset, - callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - return_pickled: typing.Union[MetaOapg.properties.return_pickled, bool, schemas.Unset] = schemas.unset, - destination_path: typing.Union[MetaOapg.properties.destination_path, None, str, schemas.Unset] = schemas.unset, - timeout_seconds: typing.Union[MetaOapg.properties.timeout_seconds, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - num_retries: typing.Union[MetaOapg.properties.num_retries, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'SyncEndpointPredictV1Request': - return super().__new__( - cls, - *_args, - url=url, - args=args, - cloudpickle=cloudpickle, - callback_url=callback_url, - callback_auth=callback_auth, - return_pickled=return_pickled, - destination_path=destination_path, - timeout_seconds=timeout_seconds, - num_retries=num_retries, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth diff --git a/launch/api_client/model/sync_endpoint_predict_v1_response.py b/launch/api_client/model/sync_endpoint_predict_v1_response.py deleted file mode 100644 index 94c526fc..00000000 --- a/launch/api_client/model/sync_endpoint_predict_v1_response.py +++ /dev/null @@ -1,156 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class SyncEndpointPredictV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "status", - } - - class properties: - - @staticmethod - def status() -> typing.Type['TaskStatus']: - return TaskStatus - result = schemas.AnyTypeSchema - - - class traceback( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'traceback': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class status_code( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'status_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "status": status, - "result": result, - "traceback": traceback, - "status_code": status_code, - } - - status: 'TaskStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["status", "result", "traceback", "status_code", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["result"]) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["traceback"]) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> typing.Union[MetaOapg.properties.status_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status", "result", "traceback", "status_code", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - status: 'TaskStatus', - result: typing.Union[MetaOapg.properties.result, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - traceback: typing.Union[MetaOapg.properties.traceback, None, str, schemas.Unset] = schemas.unset, - status_code: typing.Union[MetaOapg.properties.status_code, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'SyncEndpointPredictV1Response': - return super().__new__( - cls, - *_args, - status=status, - result=result, - traceback=traceback, - status_code=status_code, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.task_status import TaskStatus diff --git a/launch/api_client/model/task_status.py b/launch/api_client/model/task_status.py deleted file mode 100644 index f4a30cae..00000000 --- a/launch/api_client/model/task_status.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class TaskStatus( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - enum_value_to_name = { - "PENDING": "PENDING", - "STARTED": "STARTED", - "SUCCESS": "SUCCESS", - "FAILURE": "FAILURE", - "UNDEFINED": "UNDEFINED", - } - - @schemas.classproperty - def PENDING(cls): - return cls("PENDING") - - @schemas.classproperty - def STARTED(cls): - return cls("STARTED") - - @schemas.classproperty - def SUCCESS(cls): - return cls("SUCCESS") - - @schemas.classproperty - def FAILURE(cls): - return cls("FAILURE") - - @schemas.classproperty - def UNDEFINED(cls): - return cls("UNDEFINED") diff --git a/launch/api_client/model/tensorflow_framework.py b/launch/api_client/model/tensorflow_framework.py deleted file mode 100644 index 433503f7..00000000 --- a/launch/api_client/model/tensorflow_framework.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class TensorflowFramework( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a Tensorflow framework specification. - """ - - - class MetaOapg: - required = { - "tensorflow_version", - "framework_type", - } - - class properties: - - - class framework_type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "tensorflow": "TENSORFLOW", - } - - @schemas.classproperty - def TENSORFLOW(cls): - return cls("tensorflow") - tensorflow_version = schemas.StrSchema - __annotations__ = { - "framework_type": framework_type, - "tensorflow_version": tensorflow_version, - } - - tensorflow_version: MetaOapg.properties.tensorflow_version - framework_type: MetaOapg.properties.framework_type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tensorflow_version"]) -> MetaOapg.properties.tensorflow_version: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "tensorflow_version", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tensorflow_version"]) -> MetaOapg.properties.tensorflow_version: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "tensorflow_version", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - tensorflow_version: typing.Union[MetaOapg.properties.tensorflow_version, str, ], - framework_type: typing.Union[MetaOapg.properties.framework_type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'TensorflowFramework': - return super().__new__( - cls, - *_args, - tensorflow_version=tensorflow_version, - framework_type=framework_type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/token_output.py b/launch/api_client/model/token_output.py deleted file mode 100644 index 4c340d96..00000000 --- a/launch/api_client/model/token_output.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class TokenOutput( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Detailed token information. - """ - - - class MetaOapg: - required = { - "log_prob", - "token", - } - - class properties: - token = schemas.StrSchema - log_prob = schemas.NumberSchema - __annotations__ = { - "token": token, - "log_prob": log_prob, - } - - log_prob: MetaOapg.properties.log_prob - token: MetaOapg.properties.token - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["token", "log_prob", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["token", "log_prob", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - log_prob: typing.Union[MetaOapg.properties.log_prob, decimal.Decimal, int, float, ], - token: typing.Union[MetaOapg.properties.token, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'TokenOutput': - return super().__new__( - cls, - *_args, - log_prob=log_prob, - token=token, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/tool_config.py b/launch/api_client/model/tool_config.py deleted file mode 100644 index d3101fd9..00000000 --- a/launch/api_client/model/tool_config.py +++ /dev/null @@ -1,173 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ToolConfig( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Configuration for tool use. -NOTE: this config is highly experimental and signature will change significantly in future iterations. - """ - - - class MetaOapg: - required = { - "name", - } - - class properties: - name = schemas.StrSchema - - - class max_iterations( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_iterations': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class execution_timeout_seconds( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'execution_timeout_seconds': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class should_retry_on_error( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'should_retry_on_error': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "name": name, - "max_iterations": max_iterations, - "execution_timeout_seconds": execution_timeout_seconds, - "should_retry_on_error": should_retry_on_error, - } - - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_iterations"]) -> MetaOapg.properties.max_iterations: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["execution_timeout_seconds"]) -> MetaOapg.properties.execution_timeout_seconds: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["should_retry_on_error"]) -> MetaOapg.properties.should_retry_on_error: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "max_iterations", "execution_timeout_seconds", "should_retry_on_error", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_iterations"]) -> typing.Union[MetaOapg.properties.max_iterations, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["execution_timeout_seconds"]) -> typing.Union[MetaOapg.properties.execution_timeout_seconds, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["should_retry_on_error"]) -> typing.Union[MetaOapg.properties.should_retry_on_error, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "max_iterations", "execution_timeout_seconds", "should_retry_on_error", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - name: typing.Union[MetaOapg.properties.name, str, ], - max_iterations: typing.Union[MetaOapg.properties.max_iterations, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - execution_timeout_seconds: typing.Union[MetaOapg.properties.execution_timeout_seconds, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - should_retry_on_error: typing.Union[MetaOapg.properties.should_retry_on_error, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ToolConfig': - return super().__new__( - cls, - *_args, - name=name, - max_iterations=max_iterations, - execution_timeout_seconds=execution_timeout_seconds, - should_retry_on_error=should_retry_on_error, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/top_logprob.py b/launch/api_client/model/top_logprob.py deleted file mode 100644 index f53fc266..00000000 --- a/launch/api_client/model/top_logprob.py +++ /dev/null @@ -1,130 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class TopLogprob( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "logprob", - "bytes", - "token", - } - - class properties: - token = schemas.StrSchema - logprob = schemas.NumberSchema - - - class bytes( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'bytes': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "token": token, - "logprob": logprob, - "bytes": bytes, - } - - logprob: MetaOapg.properties.logprob - bytes: MetaOapg.properties.bytes - token: MetaOapg.properties.token - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - logprob: typing.Union[MetaOapg.properties.logprob, decimal.Decimal, int, float, ], - bytes: typing.Union[MetaOapg.properties.bytes, list, tuple, None, ], - token: typing.Union[MetaOapg.properties.token, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'TopLogprob': - return super().__new__( - cls, - *_args, - logprob=logprob, - bytes=bytes, - token=token, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/triton_enhanced_runnable_image_flavor.py b/launch/api_client/model/triton_enhanced_runnable_image_flavor.py deleted file mode 100644 index d884017b..00000000 --- a/launch/api_client/model/triton_enhanced_runnable_image_flavor.py +++ /dev/null @@ -1,571 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class TritonEnhancedRunnableImageFlavor( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - For deployments that require tritonserver running in a container. - """ - - - class MetaOapg: - required = { - "flavor", - "protocol", - "tag", - "repository", - "triton_commit_tag", - "triton_model_repository", - "command", - "triton_num_cpu", - } - - class properties: - repository = schemas.StrSchema - tag = schemas.StrSchema - - - class command( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'command': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class protocol( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "http": "HTTP", - } - - @schemas.classproperty - def HTTP(cls): - return cls("http") - - - class flavor( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "triton_enhanced_runnable_image": "TRITON_ENHANCED_RUNNABLE_IMAGE", - } - - @schemas.classproperty - def TRITON_ENHANCED_RUNNABLE_IMAGE(cls): - return cls("triton_enhanced_runnable_image") - triton_model_repository = schemas.StrSchema - triton_num_cpu = schemas.NumberSchema - triton_commit_tag = schemas.StrSchema - predict_route = schemas.StrSchema - healthcheck_route = schemas.StrSchema - - - class env( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - readiness_initial_delay_seconds = schemas.IntSchema - - - class extra_routes( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'extra_routes': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class routes( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'routes': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class forwarder_type( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'forwarder_type': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class worker_command( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'worker_command': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class worker_env( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'worker_env': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class triton_model_replicas( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'triton_model_replicas': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class triton_storage( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'triton_storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class triton_memory( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'triton_memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - triton_readiness_initial_delay_seconds = schemas.IntSchema - __annotations__ = { - "repository": repository, - "tag": tag, - "command": command, - "protocol": protocol, - "flavor": flavor, - "triton_model_repository": triton_model_repository, - "triton_num_cpu": triton_num_cpu, - "triton_commit_tag": triton_commit_tag, - "predict_route": predict_route, - "healthcheck_route": healthcheck_route, - "env": env, - "readiness_initial_delay_seconds": readiness_initial_delay_seconds, - "extra_routes": extra_routes, - "routes": routes, - "forwarder_type": forwarder_type, - "worker_command": worker_command, - "worker_env": worker_env, - "triton_model_replicas": triton_model_replicas, - "triton_storage": triton_storage, - "triton_memory": triton_memory, - "triton_readiness_initial_delay_seconds": triton_readiness_initial_delay_seconds, - } - - flavor: MetaOapg.properties.flavor - protocol: MetaOapg.properties.protocol - tag: MetaOapg.properties.tag - repository: MetaOapg.properties.repository - triton_commit_tag: MetaOapg.properties.triton_commit_tag - triton_model_repository: MetaOapg.properties.triton_model_repository - command: MetaOapg.properties.command - triton_num_cpu: MetaOapg.properties.triton_num_cpu - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_model_repository"]) -> MetaOapg.properties.triton_model_repository: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_num_cpu"]) -> MetaOapg.properties.triton_num_cpu: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_commit_tag"]) -> MetaOapg.properties.triton_commit_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["healthcheck_route"]) -> MetaOapg.properties.healthcheck_route: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> MetaOapg.properties.readiness_initial_delay_seconds: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["extra_routes"]) -> MetaOapg.properties.extra_routes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["routes"]) -> MetaOapg.properties.routes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["forwarder_type"]) -> MetaOapg.properties.forwarder_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["worker_command"]) -> MetaOapg.properties.worker_command: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["worker_env"]) -> MetaOapg.properties.worker_env: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_model_replicas"]) -> MetaOapg.properties.triton_model_replicas: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_storage"]) -> MetaOapg.properties.triton_storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_memory"]) -> MetaOapg.properties.triton_memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"]) -> MetaOapg.properties.triton_readiness_initial_delay_seconds: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "triton_model_repository", "triton_num_cpu", "triton_commit_tag", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "triton_model_replicas", "triton_storage", "triton_memory", "triton_readiness_initial_delay_seconds", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_model_repository"]) -> MetaOapg.properties.triton_model_repository: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_num_cpu"]) -> MetaOapg.properties.triton_num_cpu: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_commit_tag"]) -> MetaOapg.properties.triton_commit_tag: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["predict_route"]) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["healthcheck_route"]) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["extra_routes"]) -> typing.Union[MetaOapg.properties.extra_routes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["routes"]) -> typing.Union[MetaOapg.properties.routes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["forwarder_type"]) -> typing.Union[MetaOapg.properties.forwarder_type, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["worker_command"]) -> typing.Union[MetaOapg.properties.worker_command, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["worker_env"]) -> typing.Union[MetaOapg.properties.worker_env, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_model_replicas"]) -> typing.Union[MetaOapg.properties.triton_model_replicas, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_storage"]) -> typing.Union[MetaOapg.properties.triton_storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_memory"]) -> typing.Union[MetaOapg.properties.triton_memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.triton_readiness_initial_delay_seconds, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "triton_model_repository", "triton_num_cpu", "triton_commit_tag", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "triton_model_replicas", "triton_storage", "triton_memory", "triton_readiness_initial_delay_seconds", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - flavor: typing.Union[MetaOapg.properties.flavor, str, ], - protocol: typing.Union[MetaOapg.properties.protocol, str, ], - tag: typing.Union[MetaOapg.properties.tag, str, ], - repository: typing.Union[MetaOapg.properties.repository, str, ], - triton_commit_tag: typing.Union[MetaOapg.properties.triton_commit_tag, str, ], - triton_model_repository: typing.Union[MetaOapg.properties.triton_model_repository, str, ], - command: typing.Union[MetaOapg.properties.command, list, tuple, ], - triton_num_cpu: typing.Union[MetaOapg.properties.triton_num_cpu, decimal.Decimal, int, float, ], - predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, - extra_routes: typing.Union[MetaOapg.properties.extra_routes, list, tuple, schemas.Unset] = schemas.unset, - routes: typing.Union[MetaOapg.properties.routes, list, tuple, schemas.Unset] = schemas.unset, - forwarder_type: typing.Union[MetaOapg.properties.forwarder_type, None, str, schemas.Unset] = schemas.unset, - worker_command: typing.Union[MetaOapg.properties.worker_command, list, tuple, None, schemas.Unset] = schemas.unset, - worker_env: typing.Union[MetaOapg.properties.worker_env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - triton_model_replicas: typing.Union[MetaOapg.properties.triton_model_replicas, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - triton_storage: typing.Union[MetaOapg.properties.triton_storage, None, str, schemas.Unset] = schemas.unset, - triton_memory: typing.Union[MetaOapg.properties.triton_memory, None, str, schemas.Unset] = schemas.unset, - triton_readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.triton_readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'TritonEnhancedRunnableImageFlavor': - return super().__new__( - cls, - *_args, - flavor=flavor, - protocol=protocol, - tag=tag, - repository=repository, - triton_commit_tag=triton_commit_tag, - triton_model_repository=triton_model_repository, - command=command, - triton_num_cpu=triton_num_cpu, - predict_route=predict_route, - healthcheck_route=healthcheck_route, - env=env, - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - extra_routes=extra_routes, - routes=routes, - forwarder_type=forwarder_type, - worker_command=worker_command, - worker_env=worker_env, - triton_model_replicas=triton_model_replicas, - triton_storage=triton_storage, - triton_memory=triton_memory, - triton_readiness_initial_delay_seconds=triton_readiness_initial_delay_seconds, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_batch_completions_v2_request.py b/launch/api_client/model/update_batch_completions_v2_request.py deleted file mode 100644 index 033d152f..00000000 --- a/launch/api_client/model/update_batch_completions_v2_request.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateBatchCompletionsV2Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "job_id", - } - - class properties: - job_id = schemas.StrSchema - - - class priority( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "job_id": job_id, - "priority": priority, - } - - job_id: MetaOapg.properties.job_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", "priority", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", "priority", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - job_id: typing.Union[MetaOapg.properties.job_id, str, ], - priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateBatchCompletionsV2Request': - return super().__new__( - cls, - *_args, - job_id=job_id, - priority=priority, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_batch_completions_v2_response.py b/launch/api_client/model/update_batch_completions_v2_response.py deleted file mode 100644 index 8ff3fab3..00000000 --- a/launch/api_client/model/update_batch_completions_v2_response.py +++ /dev/null @@ -1,301 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateBatchCompletionsV2Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "completed_at", - "metadata", - "expires_at", - "model_config", - "job_id", - "success", - "created_at", - "output_data_path", - "status", - } - - class properties: - job_id = schemas.StrSchema - output_data_path = schemas.StrSchema - - @staticmethod - def model_config() -> typing.Type['BatchCompletionsModelConfig']: - return BatchCompletionsModelConfig - - @staticmethod - def status() -> typing.Type['BatchCompletionsJobStatus']: - return BatchCompletionsJobStatus - created_at = schemas.StrSchema - expires_at = schemas.StrSchema - - - class completed_at( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'completed_at': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - success = schemas.BoolSchema - - - class input_data_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'input_data_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class priority( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "job_id": job_id, - "output_data_path": output_data_path, - "model_config": model_config, - "status": status, - "created_at": created_at, - "expires_at": expires_at, - "completed_at": completed_at, - "metadata": metadata, - "success": success, - "input_data_path": input_data_path, - "priority": priority, - } - - completed_at: MetaOapg.properties.completed_at - metadata: MetaOapg.properties.metadata - expires_at: MetaOapg.properties.expires_at - model_config: 'BatchCompletionsModelConfig' - job_id: MetaOapg.properties.job_id - success: MetaOapg.properties.success - created_at: MetaOapg.properties.created_at - output_data_path: MetaOapg.properties.output_data_path - status: 'BatchCompletionsJobStatus' - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "success", "input_data_path", "priority", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "success", "input_data_path", "priority", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - completed_at: typing.Union[MetaOapg.properties.completed_at, None, str, ], - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, ], - expires_at: typing.Union[MetaOapg.properties.expires_at, str, ], - model_config: 'BatchCompletionsModelConfig', - job_id: typing.Union[MetaOapg.properties.job_id, str, ], - success: typing.Union[MetaOapg.properties.success, bool, ], - created_at: typing.Union[MetaOapg.properties.created_at, str, ], - output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], - status: 'BatchCompletionsJobStatus', - input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, - priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateBatchCompletionsV2Response': - return super().__new__( - cls, - *_args, - completed_at=completed_at, - metadata=metadata, - expires_at=expires_at, - model_config=model_config, - job_id=job_id, - success=success, - created_at=created_at, - output_data_path=output_data_path, - status=status, - input_data_path=input_data_path, - priority=priority, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.batch_completions_job_status import ( - BatchCompletionsJobStatus, -) -from launch.api_client.model.batch_completions_model_config import ( - BatchCompletionsModelConfig, -) diff --git a/launch/api_client/model/update_batch_job_v1_request.py b/launch/api_client/model/update_batch_job_v1_request.py deleted file mode 100644 index 963401fd..00000000 --- a/launch/api_client/model/update_batch_job_v1_request.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateBatchJobV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "cancel", - } - - class properties: - cancel = schemas.BoolSchema - __annotations__ = { - "cancel": cancel, - } - - cancel: MetaOapg.properties.cancel - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - cancel: typing.Union[MetaOapg.properties.cancel, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateBatchJobV1Request': - return super().__new__( - cls, - *_args, - cancel=cancel, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_batch_job_v1_response.py b/launch/api_client/model/update_batch_job_v1_response.py deleted file mode 100644 index 9ccfc677..00000000 --- a/launch/api_client/model/update_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateBatchJobV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - success: typing.Union[MetaOapg.properties.success, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateBatchJobV1Response': - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_deep_speed_model_endpoint_request.py b/launch/api_client/model/update_deep_speed_model_endpoint_request.py deleted file mode 100644 index 6c1902e6..00000000 --- a/launch/api_client/model/update_deep_speed_model_endpoint_request.py +++ /dev/null @@ -1,952 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateDeepSpeedModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "deepspeed": "DEEPSPEED", - } - - @schemas.classproperty - def DEEPSPEED(cls): - return cls("deepspeed") - - - class inference_framework_image_tag( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'inference_framework_image_tag': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_shards( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_shards': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class force_bundle_recreation( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'force_bundle_recreation': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "model_name": model_name, - "source": source, - "inference_framework": inference_framework, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "metadata": metadata, - "force_bundle_recreation": force_bundle_recreation, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateDeepSpeedModelEndpointRequest': - return super().__new__( - cls, - *_args, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - model_name=model_name, - source=source, - inference_framework=inference_framework, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - metadata=metadata, - force_bundle_recreation=force_bundle_recreation, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - labels=labels, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/update_docker_image_batch_job_v1_request.py b/launch/api_client/model/update_docker_image_batch_job_v1_request.py deleted file mode 100644 index 4cbb5548..00000000 --- a/launch/api_client/model/update_docker_image_batch_job_v1_request.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateDockerImageBatchJobV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "cancel", - } - - class properties: - cancel = schemas.BoolSchema - __annotations__ = { - "cancel": cancel, - } - - cancel: MetaOapg.properties.cancel - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - cancel: typing.Union[MetaOapg.properties.cancel, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateDockerImageBatchJobV1Request': - return super().__new__( - cls, - *_args, - cancel=cancel, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_docker_image_batch_job_v1_response.py b/launch/api_client/model/update_docker_image_batch_job_v1_response.py deleted file mode 100644 index ebe4960d..00000000 --- a/launch/api_client/model/update_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateDockerImageBatchJobV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - success: typing.Union[MetaOapg.properties.success, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateDockerImageBatchJobV1Response': - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_llm_model_endpoint_v1_request.py b/launch/api_client/model/update_llm_model_endpoint_v1_request.py deleted file mode 100644 index d4f00914..00000000 --- a/launch/api_client/model/update_llm_model_endpoint_v1_request.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateLLMModelEndpointV1Request( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - UpdateVLLMModelEndpointRequest, - UpdateSGLangModelEndpointRequest, - UpdateDeepSpeedModelEndpointRequest, - UpdateTextGenerationInferenceModelEndpointRequest, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateLLMModelEndpointV1Request': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.update_deep_speed_model_endpoint_request import ( - UpdateDeepSpeedModelEndpointRequest, -) -from launch.api_client.model.update_sg_lang_model_endpoint_request import ( - UpdateSGLangModelEndpointRequest, -) -from launch.api_client.model.update_text_generation_inference_model_endpoint_request import ( - UpdateTextGenerationInferenceModelEndpointRequest, -) -from launch.api_client.model.update_vllm_model_endpoint_request import ( - UpdateVLLMModelEndpointRequest, -) diff --git a/launch/api_client/model/update_llm_model_endpoint_v1_response.py b/launch/api_client/model/update_llm_model_endpoint_v1_response.py deleted file mode 100644 index a7ab1635..00000000 --- a/launch/api_client/model/update_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateLLMModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateLLMModelEndpointV1Response': - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_model_endpoint_v1_request.py b/launch/api_client/model/update_model_endpoint_v1_request.py deleted file mode 100644 index a62beca6..00000000 --- a/launch/api_client/model/update_model_endpoint_v1_request.py +++ /dev/null @@ -1,737 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateModelEndpointV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class model_bundle_id( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_bundle_id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - class MetaOapg: - inclusive_minimum = 0 - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class concurrent_requests_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'concurrent_requests_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "model_bundle_id": model_bundle_id, - "metadata": metadata, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "optimize_costs": optimize_costs, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "concurrent_requests_per_worker": concurrent_requests_per_worker, - "labels": labels, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "metadata", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "optimize_costs", "min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "labels", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> typing.Union[MetaOapg.properties.model_bundle_id, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> typing.Union[MetaOapg.properties.concurrent_requests_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "metadata", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "optimize_costs", "min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "labels", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, None, str, schemas.Unset] = schemas.unset, - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateModelEndpointV1Request': - return super().__new__( - cls, - *_args, - model_bundle_id=model_bundle_id, - metadata=metadata, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - optimize_costs=optimize_costs, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - concurrent_requests_per_worker=concurrent_requests_per_worker, - labels=labels, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/update_model_endpoint_v1_response.py b/launch/api_client/model/update_model_endpoint_v1_response.py deleted file mode 100644 index 0c5ff98f..00000000 --- a/launch/api_client/model/update_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateModelEndpointV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateModelEndpointV1Response': - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_sg_lang_model_endpoint_request.py b/launch/api_client/model/update_sg_lang_model_endpoint_request.py deleted file mode 100644 index 80834a9c..00000000 --- a/launch/api_client/model/update_sg_lang_model_endpoint_request.py +++ /dev/null @@ -1,3512 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateSGLangModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "sglang": "SGLANG", - } - - @schemas.classproperty - def SGLANG(cls): - return cls("sglang") - - - class inference_framework_image_tag( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'inference_framework_image_tag': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_shards( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_shards': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class force_bundle_recreation( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'force_bundle_recreation': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class trust_remote_code( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'trust_remote_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tp_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tp_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_tokenizer_init( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_tokenizer_init': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class kv_cache_dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'kv_cache_dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization_param_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization_param_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class context_length( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'context_length': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class device( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'device': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class served_model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'served_model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class is_embedding( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'is_embedding': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class mem_fraction_static( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'mem_fraction_static': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_running_requests( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_running_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_total_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_total_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chunked_prefill_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chunked_prefill_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_prefill_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_prefill_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class schedule_policy( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'schedule_policy': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class schedule_conservativeness( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'schedule_conservativeness': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpu_offload_gb( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cpu_offload_gb': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prefill_only_one_req( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prefill_only_one_req': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class stream_interval( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'stream_interval': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class random_seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'random_seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class constrained_json_whitespace_pattern( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'constrained_json_whitespace_pattern': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class watchdog_timeout( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'watchdog_timeout': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class download_dir( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'download_dir': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class base_gpu_id( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'base_gpu_id': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class log_level( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'log_level': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class log_level_http( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'log_level_http': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class log_requests( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'log_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class show_time_cost( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'show_time_cost': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class decode_log_interval( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'decode_log_interval': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class api_key( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'api_key': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class file_storage_pth( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'file_storage_pth': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_cache_report( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_cache_report': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class data_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'data_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_balance_method( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_balance_method': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class expert_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'expert_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dist_init_addr( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dist_init_addr': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class nnodes( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nnodes': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class node_rank( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'node_rank': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class json_model_override_args( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'json_model_override_args': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class lora_paths( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'lora_paths': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_loras_per_batch( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_loras_per_batch': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class attention_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'attention_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class sampling_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'sampling_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class grammar_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'grammar_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_algorithm( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_algorithm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_draft_model_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_draft_model_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_num_steps( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_num_steps': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_num_draft_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_num_draft_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class speculative_eagle_topk( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'speculative_eagle_topk': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_double_sparsity( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_double_sparsity': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_channel_config_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_channel_config_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_heavy_channel_num( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_heavy_channel_num': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_heavy_token_num( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_heavy_token_num': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_heavy_channel_type( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_heavy_channel_type': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class ds_sparse_decode_threshold( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ds_sparse_decode_threshold': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_radix_cache( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_radix_cache': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_jump_forward( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_jump_forward': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_cuda_graph( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_cuda_graph': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_cuda_graph_padding( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_cuda_graph_padding': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_outlines_disk_cache( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_outlines_disk_cache': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_custom_all_reduce( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_custom_all_reduce': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_mla( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_mla': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_overlap_schedule( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_overlap_schedule': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_mixed_chunk( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_mixed_chunk': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_dp_attention( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_dp_attention': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_ep_moe( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_ep_moe': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_torch_compile( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_torch_compile': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class torch_compile_max_bs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'torch_compile_max_bs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cuda_graph_max_bs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cuda_graph_max_bs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cuda_graph_bs( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.IntSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cuda_graph_bs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class torchao_config( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'torchao_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_nan_detection( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_nan_detection': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_p2p_check( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_p2p_check': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class triton_attention_reduce_in_fp32( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'triton_attention_reduce_in_fp32': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class triton_attention_num_kv_splits( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'triton_attention_num_kv_splits': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_continuous_decode_steps( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_continuous_decode_steps': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class delete_ckpt_after_loading( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'delete_ckpt_after_loading': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_memory_saver( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_memory_saver': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class allow_auto_truncate( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'allow_auto_truncate': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_custom_logit_processor( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_custom_logit_processor': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tool_call_parser( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tool_call_parser': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class huggingface_repo( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'huggingface_repo': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "model_name": model_name, - "source": source, - "inference_framework": inference_framework, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "metadata": metadata, - "force_bundle_recreation": force_bundle_recreation, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "trust_remote_code": trust_remote_code, - "tp_size": tp_size, - "skip_tokenizer_init": skip_tokenizer_init, - "load_format": load_format, - "dtype": dtype, - "kv_cache_dtype": kv_cache_dtype, - "quantization_param_path": quantization_param_path, - "quantization": quantization, - "context_length": context_length, - "device": device, - "served_model_name": served_model_name, - "chat_template": chat_template, - "is_embedding": is_embedding, - "revision": revision, - "mem_fraction_static": mem_fraction_static, - "max_running_requests": max_running_requests, - "max_total_tokens": max_total_tokens, - "chunked_prefill_size": chunked_prefill_size, - "max_prefill_tokens": max_prefill_tokens, - "schedule_policy": schedule_policy, - "schedule_conservativeness": schedule_conservativeness, - "cpu_offload_gb": cpu_offload_gb, - "prefill_only_one_req": prefill_only_one_req, - "stream_interval": stream_interval, - "random_seed": random_seed, - "constrained_json_whitespace_pattern": constrained_json_whitespace_pattern, - "watchdog_timeout": watchdog_timeout, - "download_dir": download_dir, - "base_gpu_id": base_gpu_id, - "log_level": log_level, - "log_level_http": log_level_http, - "log_requests": log_requests, - "show_time_cost": show_time_cost, - "enable_metrics": enable_metrics, - "decode_log_interval": decode_log_interval, - "api_key": api_key, - "file_storage_pth": file_storage_pth, - "enable_cache_report": enable_cache_report, - "data_parallel_size": data_parallel_size, - "load_balance_method": load_balance_method, - "expert_parallel_size": expert_parallel_size, - "dist_init_addr": dist_init_addr, - "nnodes": nnodes, - "node_rank": node_rank, - "json_model_override_args": json_model_override_args, - "lora_paths": lora_paths, - "max_loras_per_batch": max_loras_per_batch, - "attention_backend": attention_backend, - "sampling_backend": sampling_backend, - "grammar_backend": grammar_backend, - "speculative_algorithm": speculative_algorithm, - "speculative_draft_model_path": speculative_draft_model_path, - "speculative_num_steps": speculative_num_steps, - "speculative_num_draft_tokens": speculative_num_draft_tokens, - "speculative_eagle_topk": speculative_eagle_topk, - "enable_double_sparsity": enable_double_sparsity, - "ds_channel_config_path": ds_channel_config_path, - "ds_heavy_channel_num": ds_heavy_channel_num, - "ds_heavy_token_num": ds_heavy_token_num, - "ds_heavy_channel_type": ds_heavy_channel_type, - "ds_sparse_decode_threshold": ds_sparse_decode_threshold, - "disable_radix_cache": disable_radix_cache, - "disable_jump_forward": disable_jump_forward, - "disable_cuda_graph": disable_cuda_graph, - "disable_cuda_graph_padding": disable_cuda_graph_padding, - "disable_outlines_disk_cache": disable_outlines_disk_cache, - "disable_custom_all_reduce": disable_custom_all_reduce, - "disable_mla": disable_mla, - "disable_overlap_schedule": disable_overlap_schedule, - "enable_mixed_chunk": enable_mixed_chunk, - "enable_dp_attention": enable_dp_attention, - "enable_ep_moe": enable_ep_moe, - "enable_torch_compile": enable_torch_compile, - "torch_compile_max_bs": torch_compile_max_bs, - "cuda_graph_max_bs": cuda_graph_max_bs, - "cuda_graph_bs": cuda_graph_bs, - "torchao_config": torchao_config, - "enable_nan_detection": enable_nan_detection, - "enable_p2p_check": enable_p2p_check, - "triton_attention_reduce_in_fp32": triton_attention_reduce_in_fp32, - "triton_attention_num_kv_splits": triton_attention_num_kv_splits, - "num_continuous_decode_steps": num_continuous_decode_steps, - "delete_ckpt_after_loading": delete_ckpt_after_loading, - "enable_memory_saver": enable_memory_saver, - "allow_auto_truncate": allow_auto_truncate, - "enable_custom_logit_processor": enable_custom_logit_processor, - "tool_call_parser": tool_call_parser, - "huggingface_repo": huggingface_repo, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tp_size"]) -> MetaOapg.properties.tp_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> MetaOapg.properties.kv_cache_dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["context_length"]) -> MetaOapg.properties.context_length: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["device"]) -> MetaOapg.properties.device: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["is_embedding"]) -> MetaOapg.properties.is_embedding: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mem_fraction_static"]) -> MetaOapg.properties.mem_fraction_static: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_running_requests"]) -> MetaOapg.properties.max_running_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_total_tokens"]) -> MetaOapg.properties.max_total_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> MetaOapg.properties.chunked_prefill_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> MetaOapg.properties.max_prefill_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schedule_policy"]) -> MetaOapg.properties.schedule_policy: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> MetaOapg.properties.schedule_conservativeness: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> MetaOapg.properties.cpu_offload_gb: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> MetaOapg.properties.prefill_only_one_req: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stream_interval"]) -> MetaOapg.properties.stream_interval: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["random_seed"]) -> MetaOapg.properties.random_seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> MetaOapg.properties.constrained_json_whitespace_pattern: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["watchdog_timeout"]) -> MetaOapg.properties.watchdog_timeout: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["download_dir"]) -> MetaOapg.properties.download_dir: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["base_gpu_id"]) -> MetaOapg.properties.base_gpu_id: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_level"]) -> MetaOapg.properties.log_level: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_level_http"]) -> MetaOapg.properties.log_level_http: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_requests"]) -> MetaOapg.properties.log_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["show_time_cost"]) -> MetaOapg.properties.show_time_cost: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_metrics"]) -> MetaOapg.properties.enable_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["decode_log_interval"]) -> MetaOapg.properties.decode_log_interval: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["api_key"]) -> MetaOapg.properties.api_key: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["file_storage_pth"]) -> MetaOapg.properties.file_storage_pth: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_cache_report"]) -> MetaOapg.properties.enable_cache_report: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["data_parallel_size"]) -> MetaOapg.properties.data_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_balance_method"]) -> MetaOapg.properties.load_balance_method: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["expert_parallel_size"]) -> MetaOapg.properties.expert_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dist_init_addr"]) -> MetaOapg.properties.dist_init_addr: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nnodes"]) -> MetaOapg.properties.nnodes: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["node_rank"]) -> MetaOapg.properties.node_rank: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["json_model_override_args"]) -> MetaOapg.properties.json_model_override_args: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["lora_paths"]) -> MetaOapg.properties.lora_paths: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> MetaOapg.properties.max_loras_per_batch: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["sampling_backend"]) -> MetaOapg.properties.sampling_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["grammar_backend"]) -> MetaOapg.properties.grammar_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_algorithm"]) -> MetaOapg.properties.speculative_algorithm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> MetaOapg.properties.speculative_draft_model_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_num_steps"]) -> MetaOapg.properties.speculative_num_steps: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> MetaOapg.properties.speculative_num_draft_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> MetaOapg.properties.speculative_eagle_topk: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> MetaOapg.properties.enable_double_sparsity: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> MetaOapg.properties.ds_channel_config_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> MetaOapg.properties.ds_heavy_channel_num: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> MetaOapg.properties.ds_heavy_token_num: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> MetaOapg.properties.ds_heavy_channel_type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> MetaOapg.properties.ds_sparse_decode_threshold: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_radix_cache"]) -> MetaOapg.properties.disable_radix_cache: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_jump_forward"]) -> MetaOapg.properties.disable_jump_forward: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> MetaOapg.properties.disable_cuda_graph: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> MetaOapg.properties.disable_cuda_graph_padding: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> MetaOapg.properties.disable_outlines_disk_cache: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> MetaOapg.properties.disable_custom_all_reduce: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_mla"]) -> MetaOapg.properties.disable_mla: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> MetaOapg.properties.disable_overlap_schedule: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> MetaOapg.properties.enable_mixed_chunk: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_dp_attention"]) -> MetaOapg.properties.enable_dp_attention: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_ep_moe"]) -> MetaOapg.properties.enable_ep_moe: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_torch_compile"]) -> MetaOapg.properties.enable_torch_compile: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> MetaOapg.properties.torch_compile_max_bs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> MetaOapg.properties.cuda_graph_max_bs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> MetaOapg.properties.cuda_graph_bs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["torchao_config"]) -> MetaOapg.properties.torchao_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_nan_detection"]) -> MetaOapg.properties.enable_nan_detection: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_p2p_check"]) -> MetaOapg.properties.enable_p2p_check: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> MetaOapg.properties.triton_attention_reduce_in_fp32: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> MetaOapg.properties.triton_attention_num_kv_splits: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> MetaOapg.properties.num_continuous_decode_steps: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> MetaOapg.properties.delete_ckpt_after_loading: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_memory_saver"]) -> MetaOapg.properties.enable_memory_saver: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> MetaOapg.properties.allow_auto_truncate: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> MetaOapg.properties.enable_custom_logit_processor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["huggingface_repo"]) -> MetaOapg.properties.huggingface_repo: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tp_size"]) -> typing.Union[MetaOapg.properties.tp_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> typing.Union[MetaOapg.properties.kv_cache_dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["context_length"]) -> typing.Union[MetaOapg.properties.context_length, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["device"]) -> typing.Union[MetaOapg.properties.device, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["is_embedding"]) -> typing.Union[MetaOapg.properties.is_embedding, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mem_fraction_static"]) -> typing.Union[MetaOapg.properties.mem_fraction_static, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_running_requests"]) -> typing.Union[MetaOapg.properties.max_running_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_total_tokens"]) -> typing.Union[MetaOapg.properties.max_total_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> typing.Union[MetaOapg.properties.chunked_prefill_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> typing.Union[MetaOapg.properties.max_prefill_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schedule_policy"]) -> typing.Union[MetaOapg.properties.schedule_policy, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> typing.Union[MetaOapg.properties.schedule_conservativeness, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> typing.Union[MetaOapg.properties.cpu_offload_gb, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> typing.Union[MetaOapg.properties.prefill_only_one_req, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["stream_interval"]) -> typing.Union[MetaOapg.properties.stream_interval, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["random_seed"]) -> typing.Union[MetaOapg.properties.random_seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["watchdog_timeout"]) -> typing.Union[MetaOapg.properties.watchdog_timeout, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["download_dir"]) -> typing.Union[MetaOapg.properties.download_dir, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["base_gpu_id"]) -> typing.Union[MetaOapg.properties.base_gpu_id, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_level"]) -> typing.Union[MetaOapg.properties.log_level, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_level_http"]) -> typing.Union[MetaOapg.properties.log_level_http, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_requests"]) -> typing.Union[MetaOapg.properties.log_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["show_time_cost"]) -> typing.Union[MetaOapg.properties.show_time_cost, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_metrics"]) -> typing.Union[MetaOapg.properties.enable_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["decode_log_interval"]) -> typing.Union[MetaOapg.properties.decode_log_interval, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["api_key"]) -> typing.Union[MetaOapg.properties.api_key, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["file_storage_pth"]) -> typing.Union[MetaOapg.properties.file_storage_pth, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_cache_report"]) -> typing.Union[MetaOapg.properties.enable_cache_report, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["data_parallel_size"]) -> typing.Union[MetaOapg.properties.data_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_balance_method"]) -> typing.Union[MetaOapg.properties.load_balance_method, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["expert_parallel_size"]) -> typing.Union[MetaOapg.properties.expert_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dist_init_addr"]) -> typing.Union[MetaOapg.properties.dist_init_addr, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nnodes"]) -> typing.Union[MetaOapg.properties.nnodes, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["node_rank"]) -> typing.Union[MetaOapg.properties.node_rank, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["json_model_override_args"]) -> typing.Union[MetaOapg.properties.json_model_override_args, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["lora_paths"]) -> typing.Union[MetaOapg.properties.lora_paths, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> typing.Union[MetaOapg.properties.max_loras_per_batch, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["sampling_backend"]) -> typing.Union[MetaOapg.properties.sampling_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["grammar_backend"]) -> typing.Union[MetaOapg.properties.grammar_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_algorithm"]) -> typing.Union[MetaOapg.properties.speculative_algorithm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> typing.Union[MetaOapg.properties.speculative_draft_model_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_steps"]) -> typing.Union[MetaOapg.properties.speculative_num_steps, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> typing.Union[MetaOapg.properties.speculative_num_draft_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> typing.Union[MetaOapg.properties.speculative_eagle_topk, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> typing.Union[MetaOapg.properties.enable_double_sparsity, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> typing.Union[MetaOapg.properties.ds_channel_config_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_num, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_token_num, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_type, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_radix_cache"]) -> typing.Union[MetaOapg.properties.disable_radix_cache, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_jump_forward"]) -> typing.Union[MetaOapg.properties.disable_jump_forward, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph_padding, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> typing.Union[MetaOapg.properties.disable_outlines_disk_cache, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> typing.Union[MetaOapg.properties.disable_custom_all_reduce, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_mla"]) -> typing.Union[MetaOapg.properties.disable_mla, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> typing.Union[MetaOapg.properties.disable_overlap_schedule, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> typing.Union[MetaOapg.properties.enable_mixed_chunk, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_dp_attention"]) -> typing.Union[MetaOapg.properties.enable_dp_attention, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_ep_moe"]) -> typing.Union[MetaOapg.properties.enable_ep_moe, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_torch_compile"]) -> typing.Union[MetaOapg.properties.enable_torch_compile, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> typing.Union[MetaOapg.properties.torch_compile_max_bs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_max_bs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_bs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["torchao_config"]) -> typing.Union[MetaOapg.properties.torchao_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_nan_detection"]) -> typing.Union[MetaOapg.properties.enable_nan_detection, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_p2p_check"]) -> typing.Union[MetaOapg.properties.enable_p2p_check, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> typing.Union[MetaOapg.properties.num_continuous_decode_steps, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> typing.Union[MetaOapg.properties.delete_ckpt_after_loading, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_memory_saver"]) -> typing.Union[MetaOapg.properties.enable_memory_saver, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> typing.Union[MetaOapg.properties.allow_auto_truncate, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> typing.Union[MetaOapg.properties.enable_custom_logit_processor, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["huggingface_repo"]) -> typing.Union[MetaOapg.properties.huggingface_repo, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, - tp_size: typing.Union[MetaOapg.properties.tp_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, - load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, - dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, - kv_cache_dtype: typing.Union[MetaOapg.properties.kv_cache_dtype, None, str, schemas.Unset] = schemas.unset, - quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, - quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, - context_length: typing.Union[MetaOapg.properties.context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - device: typing.Union[MetaOapg.properties.device, None, str, schemas.Unset] = schemas.unset, - served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - is_embedding: typing.Union[MetaOapg.properties.is_embedding, None, bool, schemas.Unset] = schemas.unset, - revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, - mem_fraction_static: typing.Union[MetaOapg.properties.mem_fraction_static, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - max_running_requests: typing.Union[MetaOapg.properties.max_running_requests, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_total_tokens: typing.Union[MetaOapg.properties.max_total_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - chunked_prefill_size: typing.Union[MetaOapg.properties.chunked_prefill_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_prefill_tokens: typing.Union[MetaOapg.properties.max_prefill_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - schedule_policy: typing.Union[MetaOapg.properties.schedule_policy, None, str, schemas.Unset] = schemas.unset, - schedule_conservativeness: typing.Union[MetaOapg.properties.schedule_conservativeness, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - cpu_offload_gb: typing.Union[MetaOapg.properties.cpu_offload_gb, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - prefill_only_one_req: typing.Union[MetaOapg.properties.prefill_only_one_req, None, bool, schemas.Unset] = schemas.unset, - stream_interval: typing.Union[MetaOapg.properties.stream_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - random_seed: typing.Union[MetaOapg.properties.random_seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - constrained_json_whitespace_pattern: typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, - watchdog_timeout: typing.Union[MetaOapg.properties.watchdog_timeout, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - download_dir: typing.Union[MetaOapg.properties.download_dir, None, str, schemas.Unset] = schemas.unset, - base_gpu_id: typing.Union[MetaOapg.properties.base_gpu_id, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - log_level: typing.Union[MetaOapg.properties.log_level, None, str, schemas.Unset] = schemas.unset, - log_level_http: typing.Union[MetaOapg.properties.log_level_http, None, str, schemas.Unset] = schemas.unset, - log_requests: typing.Union[MetaOapg.properties.log_requests, None, bool, schemas.Unset] = schemas.unset, - show_time_cost: typing.Union[MetaOapg.properties.show_time_cost, None, bool, schemas.Unset] = schemas.unset, - enable_metrics: typing.Union[MetaOapg.properties.enable_metrics, None, bool, schemas.Unset] = schemas.unset, - decode_log_interval: typing.Union[MetaOapg.properties.decode_log_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - api_key: typing.Union[MetaOapg.properties.api_key, None, str, schemas.Unset] = schemas.unset, - file_storage_pth: typing.Union[MetaOapg.properties.file_storage_pth, None, str, schemas.Unset] = schemas.unset, - enable_cache_report: typing.Union[MetaOapg.properties.enable_cache_report, None, bool, schemas.Unset] = schemas.unset, - data_parallel_size: typing.Union[MetaOapg.properties.data_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - load_balance_method: typing.Union[MetaOapg.properties.load_balance_method, None, str, schemas.Unset] = schemas.unset, - expert_parallel_size: typing.Union[MetaOapg.properties.expert_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - dist_init_addr: typing.Union[MetaOapg.properties.dist_init_addr, None, str, schemas.Unset] = schemas.unset, - nnodes: typing.Union[MetaOapg.properties.nnodes, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - node_rank: typing.Union[MetaOapg.properties.node_rank, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - json_model_override_args: typing.Union[MetaOapg.properties.json_model_override_args, None, str, schemas.Unset] = schemas.unset, - lora_paths: typing.Union[MetaOapg.properties.lora_paths, list, tuple, None, schemas.Unset] = schemas.unset, - max_loras_per_batch: typing.Union[MetaOapg.properties.max_loras_per_batch, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, - sampling_backend: typing.Union[MetaOapg.properties.sampling_backend, None, str, schemas.Unset] = schemas.unset, - grammar_backend: typing.Union[MetaOapg.properties.grammar_backend, None, str, schemas.Unset] = schemas.unset, - speculative_algorithm: typing.Union[MetaOapg.properties.speculative_algorithm, None, str, schemas.Unset] = schemas.unset, - speculative_draft_model_path: typing.Union[MetaOapg.properties.speculative_draft_model_path, None, str, schemas.Unset] = schemas.unset, - speculative_num_steps: typing.Union[MetaOapg.properties.speculative_num_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - speculative_num_draft_tokens: typing.Union[MetaOapg.properties.speculative_num_draft_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - speculative_eagle_topk: typing.Union[MetaOapg.properties.speculative_eagle_topk, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enable_double_sparsity: typing.Union[MetaOapg.properties.enable_double_sparsity, None, bool, schemas.Unset] = schemas.unset, - ds_channel_config_path: typing.Union[MetaOapg.properties.ds_channel_config_path, None, str, schemas.Unset] = schemas.unset, - ds_heavy_channel_num: typing.Union[MetaOapg.properties.ds_heavy_channel_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - ds_heavy_token_num: typing.Union[MetaOapg.properties.ds_heavy_token_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - ds_heavy_channel_type: typing.Union[MetaOapg.properties.ds_heavy_channel_type, None, str, schemas.Unset] = schemas.unset, - ds_sparse_decode_threshold: typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - disable_radix_cache: typing.Union[MetaOapg.properties.disable_radix_cache, None, bool, schemas.Unset] = schemas.unset, - disable_jump_forward: typing.Union[MetaOapg.properties.disable_jump_forward, None, bool, schemas.Unset] = schemas.unset, - disable_cuda_graph: typing.Union[MetaOapg.properties.disable_cuda_graph, None, bool, schemas.Unset] = schemas.unset, - disable_cuda_graph_padding: typing.Union[MetaOapg.properties.disable_cuda_graph_padding, None, bool, schemas.Unset] = schemas.unset, - disable_outlines_disk_cache: typing.Union[MetaOapg.properties.disable_outlines_disk_cache, None, bool, schemas.Unset] = schemas.unset, - disable_custom_all_reduce: typing.Union[MetaOapg.properties.disable_custom_all_reduce, None, bool, schemas.Unset] = schemas.unset, - disable_mla: typing.Union[MetaOapg.properties.disable_mla, None, bool, schemas.Unset] = schemas.unset, - disable_overlap_schedule: typing.Union[MetaOapg.properties.disable_overlap_schedule, None, bool, schemas.Unset] = schemas.unset, - enable_mixed_chunk: typing.Union[MetaOapg.properties.enable_mixed_chunk, None, bool, schemas.Unset] = schemas.unset, - enable_dp_attention: typing.Union[MetaOapg.properties.enable_dp_attention, None, bool, schemas.Unset] = schemas.unset, - enable_ep_moe: typing.Union[MetaOapg.properties.enable_ep_moe, None, bool, schemas.Unset] = schemas.unset, - enable_torch_compile: typing.Union[MetaOapg.properties.enable_torch_compile, None, bool, schemas.Unset] = schemas.unset, - torch_compile_max_bs: typing.Union[MetaOapg.properties.torch_compile_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - cuda_graph_max_bs: typing.Union[MetaOapg.properties.cuda_graph_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - cuda_graph_bs: typing.Union[MetaOapg.properties.cuda_graph_bs, list, tuple, None, schemas.Unset] = schemas.unset, - torchao_config: typing.Union[MetaOapg.properties.torchao_config, None, str, schemas.Unset] = schemas.unset, - enable_nan_detection: typing.Union[MetaOapg.properties.enable_nan_detection, None, bool, schemas.Unset] = schemas.unset, - enable_p2p_check: typing.Union[MetaOapg.properties.enable_p2p_check, None, bool, schemas.Unset] = schemas.unset, - triton_attention_reduce_in_fp32: typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, None, bool, schemas.Unset] = schemas.unset, - triton_attention_num_kv_splits: typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - num_continuous_decode_steps: typing.Union[MetaOapg.properties.num_continuous_decode_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - delete_ckpt_after_loading: typing.Union[MetaOapg.properties.delete_ckpt_after_loading, None, bool, schemas.Unset] = schemas.unset, - enable_memory_saver: typing.Union[MetaOapg.properties.enable_memory_saver, None, bool, schemas.Unset] = schemas.unset, - allow_auto_truncate: typing.Union[MetaOapg.properties.allow_auto_truncate, None, bool, schemas.Unset] = schemas.unset, - enable_custom_logit_processor: typing.Union[MetaOapg.properties.enable_custom_logit_processor, None, bool, schemas.Unset] = schemas.unset, - tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, - huggingface_repo: typing.Union[MetaOapg.properties.huggingface_repo, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateSGLangModelEndpointRequest': - return super().__new__( - cls, - *_args, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - model_name=model_name, - source=source, - inference_framework=inference_framework, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - metadata=metadata, - force_bundle_recreation=force_bundle_recreation, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - labels=labels, - trust_remote_code=trust_remote_code, - tp_size=tp_size, - skip_tokenizer_init=skip_tokenizer_init, - load_format=load_format, - dtype=dtype, - kv_cache_dtype=kv_cache_dtype, - quantization_param_path=quantization_param_path, - quantization=quantization, - context_length=context_length, - device=device, - served_model_name=served_model_name, - chat_template=chat_template, - is_embedding=is_embedding, - revision=revision, - mem_fraction_static=mem_fraction_static, - max_running_requests=max_running_requests, - max_total_tokens=max_total_tokens, - chunked_prefill_size=chunked_prefill_size, - max_prefill_tokens=max_prefill_tokens, - schedule_policy=schedule_policy, - schedule_conservativeness=schedule_conservativeness, - cpu_offload_gb=cpu_offload_gb, - prefill_only_one_req=prefill_only_one_req, - stream_interval=stream_interval, - random_seed=random_seed, - constrained_json_whitespace_pattern=constrained_json_whitespace_pattern, - watchdog_timeout=watchdog_timeout, - download_dir=download_dir, - base_gpu_id=base_gpu_id, - log_level=log_level, - log_level_http=log_level_http, - log_requests=log_requests, - show_time_cost=show_time_cost, - enable_metrics=enable_metrics, - decode_log_interval=decode_log_interval, - api_key=api_key, - file_storage_pth=file_storage_pth, - enable_cache_report=enable_cache_report, - data_parallel_size=data_parallel_size, - load_balance_method=load_balance_method, - expert_parallel_size=expert_parallel_size, - dist_init_addr=dist_init_addr, - nnodes=nnodes, - node_rank=node_rank, - json_model_override_args=json_model_override_args, - lora_paths=lora_paths, - max_loras_per_batch=max_loras_per_batch, - attention_backend=attention_backend, - sampling_backend=sampling_backend, - grammar_backend=grammar_backend, - speculative_algorithm=speculative_algorithm, - speculative_draft_model_path=speculative_draft_model_path, - speculative_num_steps=speculative_num_steps, - speculative_num_draft_tokens=speculative_num_draft_tokens, - speculative_eagle_topk=speculative_eagle_topk, - enable_double_sparsity=enable_double_sparsity, - ds_channel_config_path=ds_channel_config_path, - ds_heavy_channel_num=ds_heavy_channel_num, - ds_heavy_token_num=ds_heavy_token_num, - ds_heavy_channel_type=ds_heavy_channel_type, - ds_sparse_decode_threshold=ds_sparse_decode_threshold, - disable_radix_cache=disable_radix_cache, - disable_jump_forward=disable_jump_forward, - disable_cuda_graph=disable_cuda_graph, - disable_cuda_graph_padding=disable_cuda_graph_padding, - disable_outlines_disk_cache=disable_outlines_disk_cache, - disable_custom_all_reduce=disable_custom_all_reduce, - disable_mla=disable_mla, - disable_overlap_schedule=disable_overlap_schedule, - enable_mixed_chunk=enable_mixed_chunk, - enable_dp_attention=enable_dp_attention, - enable_ep_moe=enable_ep_moe, - enable_torch_compile=enable_torch_compile, - torch_compile_max_bs=torch_compile_max_bs, - cuda_graph_max_bs=cuda_graph_max_bs, - cuda_graph_bs=cuda_graph_bs, - torchao_config=torchao_config, - enable_nan_detection=enable_nan_detection, - enable_p2p_check=enable_p2p_check, - triton_attention_reduce_in_fp32=triton_attention_reduce_in_fp32, - triton_attention_num_kv_splits=triton_attention_num_kv_splits, - num_continuous_decode_steps=num_continuous_decode_steps, - delete_ckpt_after_loading=delete_ckpt_after_loading, - enable_memory_saver=enable_memory_saver, - allow_auto_truncate=allow_auto_truncate, - enable_custom_logit_processor=enable_custom_logit_processor, - tool_call_parser=tool_call_parser, - huggingface_repo=huggingface_repo, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/update_text_generation_inference_model_endpoint_request.py b/launch/api_client/model/update_text_generation_inference_model_endpoint_request.py deleted file mode 100644 index fb601999..00000000 --- a/launch/api_client/model/update_text_generation_inference_model_endpoint_request.py +++ /dev/null @@ -1,952 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateTextGenerationInferenceModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "text_generation_inference": "TEXT_GENERATION_INFERENCE", - } - - @schemas.classproperty - def TEXT_GENERATION_INFERENCE(cls): - return cls("text_generation_inference") - - - class inference_framework_image_tag( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'inference_framework_image_tag': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_shards( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_shards': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class force_bundle_recreation( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'force_bundle_recreation': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "model_name": model_name, - "source": source, - "inference_framework": inference_framework, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "metadata": metadata, - "force_bundle_recreation": force_bundle_recreation, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateTextGenerationInferenceModelEndpointRequest': - return super().__new__( - cls, - *_args, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - model_name=model_name, - source=source, - inference_framework=inference_framework, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - metadata=metadata, - force_bundle_recreation=force_bundle_recreation, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - labels=labels, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/update_trigger_v1_request.py b/launch/api_client/model/update_trigger_v1_request.py deleted file mode 100644 index d36707a9..00000000 --- a/launch/api_client/model/update_trigger_v1_request.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateTriggerV1Request( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class cron_schedule( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cron_schedule': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class suspend( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'suspend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "cron_schedule": cron_schedule, - "suspend": suspend, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suspend"]) -> MetaOapg.properties.suspend: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["cron_schedule", "suspend", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> typing.Union[MetaOapg.properties.cron_schedule, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["suspend"]) -> typing.Union[MetaOapg.properties.suspend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cron_schedule", "suspend", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, None, str, schemas.Unset] = schemas.unset, - suspend: typing.Union[MetaOapg.properties.suspend, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateTriggerV1Request': - return super().__new__( - cls, - *_args, - cron_schedule=cron_schedule, - suspend=suspend, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_trigger_v1_response.py b/launch/api_client/model/update_trigger_v1_response.py deleted file mode 100644 index 130d49d8..00000000 --- a/launch/api_client/model/update_trigger_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateTriggerV1Response( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - success: typing.Union[MetaOapg.properties.success, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateTriggerV1Response': - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_vllm_model_endpoint_request.py b/launch/api_client/model/update_vllm_model_endpoint_request.py deleted file mode 100644 index c7c113f3..00000000 --- a/launch/api_client/model/update_vllm_model_endpoint_request.py +++ /dev/null @@ -1,2093 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UpdateVLLMModelEndpointRequest( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - @staticmethod - def quantize() -> typing.Type['Quantization']: - return Quantization - - - class checkpoint_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'checkpoint_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class post_inference_hooks( - schemas.ListBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneTupleMixin - ): - - - class MetaOapg: - items = schemas.StrSchema - - - def __new__( - cls, - *_args: typing.Union[list, tuple, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'post_inference_hooks': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cpus( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'cpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class gpus( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpus': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class memory( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'memory': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type['GpuType']: - return GpuType - - - class storage( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'storage': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class nodes_per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'nodes_per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class optimize_costs( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'optimize_costs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class prewarm( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'prewarm': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class high_priority( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'high_priority': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class billing_tags( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'billing_tags': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class default_callback_url( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'default_callback_url': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def default_callback_auth() -> typing.Type['CallbackAuth']: - return CallbackAuth - - - class public_inference( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'public_inference': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template_override( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_startup_metrics( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_startup_metrics': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - @staticmethod - def source() -> typing.Type['LLMSource']: - return LLMSource - - - class inference_framework( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "vllm": "VLLM", - } - - @schemas.classproperty - def VLLM(cls): - return cls("vllm") - - - class inference_framework_image_tag( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'inference_framework_image_tag': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_shards( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_shards': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class metadata( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'metadata': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class force_bundle_recreation( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'force_bundle_recreation': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class min_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'min_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_workers( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_workers': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class per_worker( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'per_worker': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class labels( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.StrSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, str, ], - ) -> 'labels': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class max_gpu_memory_utilization( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_gpu_memory_utilization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class attention_backend( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'attention_backend': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_model_len( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_model_len': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_seqs( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_seqs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enforce_eager( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enforce_eager': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class trust_remote_code( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'trust_remote_code': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class pipeline_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'pipeline_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tensor_parallel_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tensor_parallel_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_log_requests( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_log_requests': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class chat_template( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'chat_template': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tool_call_parser( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tool_call_parser': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_auto_tool_choice( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_auto_tool_choice': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class load_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'load_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class config_format( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'config_format': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer_mode( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_mode': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class limit_mm_per_prompt( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'limit_mm_per_prompt': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_num_batched_tokens( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_num_batched_tokens': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class tokenizer( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class seed( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'seed': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class code_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'code_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class rope_scaling( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'rope_scaling': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class tokenizer_revision( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'tokenizer_revision': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class quantization_param_path( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'quantization_param_path': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class max_seq_len_to_capture( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'max_seq_len_to_capture': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class disable_sliding_window( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'disable_sliding_window': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class skip_tokenizer_init( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'skip_tokenizer_init': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class served_model_name( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'served_model_name': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class override_neuron_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'override_neuron_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class mm_processor_kwargs( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'mm_processor_kwargs': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - - class block_size( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'block_size': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class gpu_memory_utilization( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'gpu_memory_utilization': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class swap_space( - schemas.NumberBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, float, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'swap_space': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class cache_dtype( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'cache_dtype': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class num_gpu_blocks_override( - schemas.IntBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneDecimalMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, decimal.Decimal, int, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'num_gpu_blocks_override': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class enable_prefix_caching( - schemas.BoolBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneBoolMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, bool, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'enable_prefix_caching': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "quantize": quantize, - "checkpoint_path": checkpoint_path, - "post_inference_hooks": post_inference_hooks, - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "storage": storage, - "nodes_per_worker": nodes_per_worker, - "optimize_costs": optimize_costs, - "prewarm": prewarm, - "high_priority": high_priority, - "billing_tags": billing_tags, - "default_callback_url": default_callback_url, - "default_callback_auth": default_callback_auth, - "public_inference": public_inference, - "chat_template_override": chat_template_override, - "enable_startup_metrics": enable_startup_metrics, - "model_name": model_name, - "source": source, - "inference_framework": inference_framework, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "metadata": metadata, - "force_bundle_recreation": force_bundle_recreation, - "min_workers": min_workers, - "max_workers": max_workers, - "per_worker": per_worker, - "labels": labels, - "max_gpu_memory_utilization": max_gpu_memory_utilization, - "attention_backend": attention_backend, - "max_model_len": max_model_len, - "max_num_seqs": max_num_seqs, - "enforce_eager": enforce_eager, - "trust_remote_code": trust_remote_code, - "pipeline_parallel_size": pipeline_parallel_size, - "tensor_parallel_size": tensor_parallel_size, - "quantization": quantization, - "disable_log_requests": disable_log_requests, - "chat_template": chat_template, - "tool_call_parser": tool_call_parser, - "enable_auto_tool_choice": enable_auto_tool_choice, - "load_format": load_format, - "config_format": config_format, - "tokenizer_mode": tokenizer_mode, - "limit_mm_per_prompt": limit_mm_per_prompt, - "max_num_batched_tokens": max_num_batched_tokens, - "tokenizer": tokenizer, - "dtype": dtype, - "seed": seed, - "revision": revision, - "code_revision": code_revision, - "rope_scaling": rope_scaling, - "tokenizer_revision": tokenizer_revision, - "quantization_param_path": quantization_param_path, - "max_seq_len_to_capture": max_seq_len_to_capture, - "disable_sliding_window": disable_sliding_window, - "skip_tokenizer_init": skip_tokenizer_init, - "served_model_name": served_model_name, - "override_neuron_config": override_neuron_config, - "mm_processor_kwargs": mm_processor_kwargs, - "block_size": block_size, - "gpu_memory_utilization": gpu_memory_utilization, - "swap_space": swap_space, - "cache_dtype": cache_dtype, - "num_gpu_blocks_override": num_gpu_blocks_override, - "enable_prefix_caching": enable_prefix_caching, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> MetaOapg.properties.max_gpu_memory_utilization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.max_gpu_memory_utilization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, - cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, - nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, - billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, - chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, - enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, - model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, - source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, - inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - max_gpu_memory_utilization: typing.Union[MetaOapg.properties.max_gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, - max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, - trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, - pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, - disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, - chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, - tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, - enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, - load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, - config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, - tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, - limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, - max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, - dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, - code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, - rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, - quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, - max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, - skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, - served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, - override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, - num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, - enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UpdateVLLMModelEndpointRequest': - return super().__new__( - cls, - *_args, - quantize=quantize, - checkpoint_path=checkpoint_path, - post_inference_hooks=post_inference_hooks, - cpus=cpus, - gpus=gpus, - memory=memory, - gpu_type=gpu_type, - storage=storage, - nodes_per_worker=nodes_per_worker, - optimize_costs=optimize_costs, - prewarm=prewarm, - high_priority=high_priority, - billing_tags=billing_tags, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - public_inference=public_inference, - chat_template_override=chat_template_override, - enable_startup_metrics=enable_startup_metrics, - model_name=model_name, - source=source, - inference_framework=inference_framework, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - metadata=metadata, - force_bundle_recreation=force_bundle_recreation, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - labels=labels, - max_gpu_memory_utilization=max_gpu_memory_utilization, - attention_backend=attention_backend, - max_model_len=max_model_len, - max_num_seqs=max_num_seqs, - enforce_eager=enforce_eager, - trust_remote_code=trust_remote_code, - pipeline_parallel_size=pipeline_parallel_size, - tensor_parallel_size=tensor_parallel_size, - quantization=quantization, - disable_log_requests=disable_log_requests, - chat_template=chat_template, - tool_call_parser=tool_call_parser, - enable_auto_tool_choice=enable_auto_tool_choice, - load_format=load_format, - config_format=config_format, - tokenizer_mode=tokenizer_mode, - limit_mm_per_prompt=limit_mm_per_prompt, - max_num_batched_tokens=max_num_batched_tokens, - tokenizer=tokenizer, - dtype=dtype, - seed=seed, - revision=revision, - code_revision=code_revision, - rope_scaling=rope_scaling, - tokenizer_revision=tokenizer_revision, - quantization_param_path=quantization_param_path, - max_seq_len_to_capture=max_seq_len_to_capture, - disable_sliding_window=disable_sliding_window, - skip_tokenizer_init=skip_tokenizer_init, - served_model_name=served_model_name, - override_neuron_config=override_neuron_config, - mm_processor_kwargs=mm_processor_kwargs, - block_size=block_size, - gpu_memory_utilization=gpu_memory_utilization, - swap_space=swap_space, - cache_dtype=cache_dtype, - num_gpu_blocks_override=num_gpu_blocks_override, - enable_prefix_caching=enable_prefix_caching, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/upload_file_response.py b/launch/api_client/model/upload_file_response.py deleted file mode 100644 index 15cfb00e..00000000 --- a/launch/api_client/model/upload_file_response.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UploadFileResponse( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for uploading a file. - """ - - - class MetaOapg: - required = { - "id", - } - - class properties: - id = schemas.StrSchema - __annotations__ = { - "id": id, - } - - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - id: typing.Union[MetaOapg.properties.id, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UploadFileResponse': - return super().__new__( - cls, - *_args, - id=id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/url_citation.py b/launch/api_client/model/url_citation.py deleted file mode 100644 index 47ddef88..00000000 --- a/launch/api_client/model/url_citation.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UrlCitation( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "start_index", - "end_index", - "title", - "url", - } - - class properties: - end_index = schemas.IntSchema - start_index = schemas.IntSchema - url = schemas.StrSchema - title = schemas.StrSchema - __annotations__ = { - "end_index": end_index, - "start_index": start_index, - "url": url, - "title": title, - } - - start_index: MetaOapg.properties.start_index - end_index: MetaOapg.properties.end_index - title: MetaOapg.properties.title - url: MetaOapg.properties.url - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["end_index"]) -> MetaOapg.properties.end_index: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["start_index"]) -> MetaOapg.properties.start_index: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["title"]) -> MetaOapg.properties.title: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["end_index", "start_index", "url", "title", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["end_index"]) -> MetaOapg.properties.end_index: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["start_index"]) -> MetaOapg.properties.start_index: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["title"]) -> MetaOapg.properties.title: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["end_index", "start_index", "url", "title", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - start_index: typing.Union[MetaOapg.properties.start_index, decimal.Decimal, int, ], - end_index: typing.Union[MetaOapg.properties.end_index, decimal.Decimal, int, ], - title: typing.Union[MetaOapg.properties.title, str, ], - url: typing.Union[MetaOapg.properties.url, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UrlCitation': - return super().__new__( - cls, - *_args, - start_index=start_index, - end_index=end_index, - title=title, - url=url, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/user_location.py b/launch/api_client/model/user_location.py deleted file mode 100644 index 7aa18e60..00000000 --- a/launch/api_client/model/user_location.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class UserLocation( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "approximate", - "type", - } - - class properties: - - - class type( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "approximate": "APPROXIMATE", - } - - @schemas.classproperty - def APPROXIMATE(cls): - return cls("approximate") - - @staticmethod - def approximate() -> typing.Type['WebSearchLocation']: - return WebSearchLocation - __annotations__ = { - "type": type, - "approximate": approximate, - } - - approximate: 'WebSearchLocation' - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["approximate"]) -> 'WebSearchLocation': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "approximate", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["approximate"]) -> 'WebSearchLocation': ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "approximate", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - approximate: 'WebSearchLocation', - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'UserLocation': - return super().__new__( - cls, - *_args, - approximate=approximate, - type=type, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.web_search_location import WebSearchLocation diff --git a/launch/api_client/model/validation_error.py b/launch/api_client/model/validation_error.py deleted file mode 100644 index 6b4dab58..00000000 --- a/launch/api_client/model/validation_error.py +++ /dev/null @@ -1,167 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ValidationError( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - required = { - "msg", - "loc", - "type", - } - - class properties: - - - class loc( - schemas.ListSchema - ): - - - class MetaOapg: - - - class items( - schemas.ComposedSchema, - ): - - - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'items': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ]], typing.List[typing.Union[MetaOapg.items, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'loc': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - msg = schemas.StrSchema - type = schemas.StrSchema - __annotations__ = { - "loc": loc, - "msg": msg, - "type": type, - } - - msg: MetaOapg.properties.msg - loc: MetaOapg.properties.loc - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["loc", "msg", "type", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["loc", "msg", "type", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - msg: typing.Union[MetaOapg.properties.msg, str, ], - loc: typing.Union[MetaOapg.properties.loc, list, tuple, ], - type: typing.Union[MetaOapg.properties.type, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ValidationError': - return super().__new__( - cls, - *_args, - msg=msg, - loc=loc, - type=type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/voice_ids_shared.py b/launch/api_client/model/voice_ids_shared.py deleted file mode 100644 index 2b44a00e..00000000 --- a/launch/api_client/model/voice_ids_shared.py +++ /dev/null @@ -1,132 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class VoiceIdsShared( - schemas.ComposedSchema, -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - any_of_0 = schemas.StrSchema - - - class any_of_1( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "alloy": "ALLOY", - "ash": "ASH", - "ballad": "BALLAD", - "coral": "CORAL", - "echo": "ECHO", - "fable": "FABLE", - "onyx": "ONYX", - "nova": "NOVA", - "sage": "SAGE", - "shimmer": "SHIMMER", - "verse": "VERSE", - } - - @schemas.classproperty - def ALLOY(cls): - return cls("alloy") - - @schemas.classproperty - def ASH(cls): - return cls("ash") - - @schemas.classproperty - def BALLAD(cls): - return cls("ballad") - - @schemas.classproperty - def CORAL(cls): - return cls("coral") - - @schemas.classproperty - def ECHO(cls): - return cls("echo") - - @schemas.classproperty - def FABLE(cls): - return cls("fable") - - @schemas.classproperty - def ONYX(cls): - return cls("onyx") - - @schemas.classproperty - def NOVA(cls): - return cls("nova") - - @schemas.classproperty - def SAGE(cls): - return cls("sage") - - @schemas.classproperty - def SHIMMER(cls): - return cls("shimmer") - - @schemas.classproperty - def VERSE(cls): - return cls("verse") - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'VoiceIdsShared': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/web_search_context_size.py b/launch/api_client/model/web_search_context_size.py deleted file mode 100644 index 31c3e655..00000000 --- a/launch/api_client/model/web_search_context_size.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class WebSearchContextSize( - schemas.EnumBase, - schemas.StrSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - High level guidance for the amount of context window space to use for the -search. One of `low`, `medium`, or `high`. `medium` is the default. - - """ - - - class MetaOapg: - enum_value_to_name = { - "low": "LOW", - "medium": "MEDIUM", - "high": "HIGH", - } - - @schemas.classproperty - def LOW(cls): - return cls("low") - - @schemas.classproperty - def MEDIUM(cls): - return cls("medium") - - @schemas.classproperty - def HIGH(cls): - return cls("high") diff --git a/launch/api_client/model/web_search_location.py b/launch/api_client/model/web_search_location.py deleted file mode 100644 index 20fbb38c..00000000 --- a/launch/api_client/model/web_search_location.py +++ /dev/null @@ -1,184 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class WebSearchLocation( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - - class country( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'country': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class region( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'region': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class city( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'city': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - - class timezone( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin - ): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'timezone': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - __annotations__ = { - "country": country, - "region": region, - "city": city, - "timezone": timezone, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["country"]) -> MetaOapg.properties.country: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["region"]) -> MetaOapg.properties.region: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["city"]) -> MetaOapg.properties.city: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timezone"]) -> MetaOapg.properties.timezone: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["country", "region", "city", "timezone", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["country"]) -> typing.Union[MetaOapg.properties.country, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["region"]) -> typing.Union[MetaOapg.properties.region, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["city"]) -> typing.Union[MetaOapg.properties.city, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["timezone"]) -> typing.Union[MetaOapg.properties.timezone, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["country", "region", "city", "timezone", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - country: typing.Union[MetaOapg.properties.country, None, str, schemas.Unset] = schemas.unset, - region: typing.Union[MetaOapg.properties.region, None, str, schemas.Unset] = schemas.unset, - city: typing.Union[MetaOapg.properties.city, None, str, schemas.Unset] = schemas.unset, - timezone: typing.Union[MetaOapg.properties.timezone, None, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'WebSearchLocation': - return super().__new__( - cls, - *_args, - country=country, - region=region, - city=city, - timezone=timezone, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/web_search_options.py b/launch/api_client/model/web_search_options.py deleted file mode 100644 index f14e71b3..00000000 --- a/launch/api_client/model/web_search_options.py +++ /dev/null @@ -1,99 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class WebSearchOptions( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - - class MetaOapg: - - class properties: - - @staticmethod - def user_location() -> typing.Type['UserLocation']: - return UserLocation - - @staticmethod - def search_context_size() -> typing.Type['WebSearchContextSize']: - return WebSearchContextSize - __annotations__ = { - "user_location": user_location, - "search_context_size": search_context_size, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["user_location"]) -> 'UserLocation': ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["search_context_size"]) -> 'WebSearchContextSize': ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["user_location", "search_context_size", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["user_location"]) -> typing.Union['UserLocation', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["search_context_size"]) -> typing.Union['WebSearchContextSize', schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["user_location", "search_context_size", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - user_location: typing.Union['UserLocation', schemas.Unset] = schemas.unset, - search_context_size: typing.Union['WebSearchContextSize', schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'WebSearchOptions': - return super().__new__( - cls, - *_args, - user_location=user_location, - search_context_size=search_context_size, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.user_location import UserLocation -from launch.api_client.model.web_search_context_size import ( - WebSearchContextSize, -) diff --git a/launch/api_client/model/zip_artifact_flavor.py b/launch/api_client/model/zip_artifact_flavor.py deleted file mode 100644 index f2ade7df..00000000 --- a/launch/api_client/model/zip_artifact_flavor.py +++ /dev/null @@ -1,265 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 - -from launch.api_client import schemas # noqa: F401 - - -class ZipArtifactFlavor( - schemas.DictSchema -): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle flavor of a zip artifact. - """ - - - class MetaOapg: - required = { - "flavor", - "requirements", - "framework", - "load_model_fn_module_path", - "location", - "load_predict_fn_module_path", - } - - class properties: - - - class requirements( - schemas.ListSchema - ): - - - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'requirements': - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - - class framework( - schemas.ComposedSchema, - ): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - PytorchFramework, - TensorflowFramework, - CustomFramework, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'framework': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - location = schemas.StrSchema - - - class flavor( - schemas.EnumBase, - schemas.StrSchema - ): - - - class MetaOapg: - enum_value_to_name = { - "zip_artifact": "ZIP_ARTIFACT", - } - - @schemas.classproperty - def ZIP_ARTIFACT(cls): - return cls("zip_artifact") - load_predict_fn_module_path = schemas.StrSchema - load_model_fn_module_path = schemas.StrSchema - - - class app_config( - schemas.DictBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneFrozenDictMixin - ): - - - class MetaOapg: - additional_properties = schemas.AnyTypeSchema - - - def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, None, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - ) -> 'app_config': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "requirements": requirements, - "framework": framework, - "location": location, - "flavor": flavor, - "load_predict_fn_module_path": load_predict_fn_module_path, - "load_model_fn_module_path": load_model_fn_module_path, - "app_config": app_config, - } - - flavor: MetaOapg.properties.flavor - requirements: MetaOapg.properties.requirements - framework: MetaOapg.properties.framework - load_model_fn_module_path: MetaOapg.properties.load_model_fn_module_path - location: MetaOapg.properties.location - load_predict_fn_module_path: MetaOapg.properties.load_predict_fn_module_path - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_predict_fn_module_path"]) -> MetaOapg.properties.load_predict_fn_module_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_model_fn_module_path"]) -> MetaOapg.properties.load_model_fn_module_path: ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - - def __getitem__(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn_module_path", "load_model_fn_module_path", "app_config", ], str]): - # dict_instance[name] accessor - return super().__getitem__(name) - - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_predict_fn_module_path"]) -> MetaOapg.properties.load_predict_fn_module_path: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_model_fn_module_path"]) -> MetaOapg.properties.load_model_fn_module_path: ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - - def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn_module_path", "load_model_fn_module_path", "app_config", ], str]): - return super().get_item_oapg(name) - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, ], - flavor: typing.Union[MetaOapg.properties.flavor, str, ], - requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], - framework: typing.Union[MetaOapg.properties.framework, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - load_model_fn_module_path: typing.Union[MetaOapg.properties.load_model_fn_module_path, str, ], - location: typing.Union[MetaOapg.properties.location, str, ], - load_predict_fn_module_path: typing.Union[MetaOapg.properties.load_predict_fn_module_path, str, ], - app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'ZipArtifactFlavor': - return super().__new__( - cls, - *_args, - flavor=flavor, - requirements=requirements, - framework=framework, - load_model_fn_module_path=load_model_fn_module_path, - location=location, - load_predict_fn_module_path=load_predict_fn_module_path, - app_config=app_config, - _configuration=_configuration, - **kwargs, - ) - -from launch.api_client.model.custom_framework import CustomFramework -from launch.api_client.model.pytorch_framework import PytorchFramework -from launch.api_client.model.tensorflow_framework import TensorflowFramework diff --git a/launch/api_client/models/__init__.py b/launch/api_client/models/__init__.py deleted file mode 100644 index 5a993880..00000000 --- a/launch/api_client/models/__init__.py +++ /dev/null @@ -1,512 +0,0 @@ -# coding: utf-8 - -# flake8: noqa - -# import all models into this package -# if you have many models here with many references from one model to another this may -# raise a RecursionError -# to avoid this, import only the models that you directly need like: -# from launch.api_client.model.pet import Pet -# or import this package, but before doing it, use: -# import sys -# sys.setrecursionlimit(n) - -from launch.api_client.model.annotation import Annotation -from launch.api_client.model.audio import Audio -from launch.api_client.model.audio1 import Audio1 -from launch.api_client.model.audio2 import Audio2 -from launch.api_client.model.batch_completions_job import BatchCompletionsJob -from launch.api_client.model.batch_completions_job_status import ( - BatchCompletionsJobStatus, -) -from launch.api_client.model.batch_completions_model_config import ( - BatchCompletionsModelConfig, -) -from launch.api_client.model.batch_job_serialization_format import ( - BatchJobSerializationFormat, -) -from launch.api_client.model.batch_job_status import BatchJobStatus -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.callback_basic_auth import CallbackBasicAuth -from launch.api_client.model.callbackm_tls_auth import CallbackmTLSAuth -from launch.api_client.model.cancel_batch_completions_v2_response import ( - CancelBatchCompletionsV2Response, -) -from launch.api_client.model.cancel_fine_tune_response import ( - CancelFineTuneResponse, -) -from launch.api_client.model.chat_completion_function_call_option import ( - ChatCompletionFunctionCallOption, -) -from launch.api_client.model.chat_completion_functions import ( - ChatCompletionFunctions, -) -from launch.api_client.model.chat_completion_message_tool_call import ( - ChatCompletionMessageToolCall, -) -from launch.api_client.model.chat_completion_message_tool_call_chunk import ( - ChatCompletionMessageToolCallChunk, -) -from launch.api_client.model.chat_completion_message_tool_calls_input import ( - ChatCompletionMessageToolCallsInput, -) -from launch.api_client.model.chat_completion_message_tool_calls_output import ( - ChatCompletionMessageToolCallsOutput, -) -from launch.api_client.model.chat_completion_named_tool_choice import ( - ChatCompletionNamedToolChoice, -) -from launch.api_client.model.chat_completion_request_assistant_message import ( - ChatCompletionRequestAssistantMessage, -) -from launch.api_client.model.chat_completion_request_assistant_message_content_part import ( - ChatCompletionRequestAssistantMessageContentPart, -) -from launch.api_client.model.chat_completion_request_developer_message import ( - ChatCompletionRequestDeveloperMessage, -) -from launch.api_client.model.chat_completion_request_function_message import ( - ChatCompletionRequestFunctionMessage, -) -from launch.api_client.model.chat_completion_request_message import ( - ChatCompletionRequestMessage, -) -from launch.api_client.model.chat_completion_request_message_content_part_audio import ( - ChatCompletionRequestMessageContentPartAudio, -) -from launch.api_client.model.chat_completion_request_message_content_part_file import ( - ChatCompletionRequestMessageContentPartFile, -) -from launch.api_client.model.chat_completion_request_message_content_part_image import ( - ChatCompletionRequestMessageContentPartImage, -) -from launch.api_client.model.chat_completion_request_message_content_part_refusal import ( - ChatCompletionRequestMessageContentPartRefusal, -) -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) -from launch.api_client.model.chat_completion_request_system_message import ( - ChatCompletionRequestSystemMessage, -) -from launch.api_client.model.chat_completion_request_system_message_content_part import ( - ChatCompletionRequestSystemMessageContentPart, -) -from launch.api_client.model.chat_completion_request_tool_message import ( - ChatCompletionRequestToolMessage, -) -from launch.api_client.model.chat_completion_request_tool_message_content_part import ( - ChatCompletionRequestToolMessageContentPart, -) -from launch.api_client.model.chat_completion_request_user_message import ( - ChatCompletionRequestUserMessage, -) -from launch.api_client.model.chat_completion_request_user_message_content_part import ( - ChatCompletionRequestUserMessageContentPart, -) -from launch.api_client.model.chat_completion_response_message import ( - ChatCompletionResponseMessage, -) -from launch.api_client.model.chat_completion_stream_options import ( - ChatCompletionStreamOptions, -) -from launch.api_client.model.chat_completion_stream_response_delta import ( - ChatCompletionStreamResponseDelta, -) -from launch.api_client.model.chat_completion_token_logprob import ( - ChatCompletionTokenLogprob, -) -from launch.api_client.model.chat_completion_tool import ChatCompletionTool -from launch.api_client.model.chat_completion_tool_choice_option import ( - ChatCompletionToolChoiceOption, -) -from launch.api_client.model.chat_completion_v2_request import ( - ChatCompletionV2Request, -) -from launch.api_client.model.chat_completion_v2_stream_error_chunk import ( - ChatCompletionV2StreamErrorChunk, -) -from launch.api_client.model.choice import Choice -from launch.api_client.model.choice1 import Choice1 -from launch.api_client.model.choice2 import Choice2 -from launch.api_client.model.clone_model_bundle_v1_request import ( - CloneModelBundleV1Request, -) -from launch.api_client.model.clone_model_bundle_v2_request import ( - CloneModelBundleV2Request, -) -from launch.api_client.model.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) -from launch.api_client.model.completion_output import CompletionOutput -from launch.api_client.model.completion_stream_output import ( - CompletionStreamOutput, -) -from launch.api_client.model.completion_stream_v1_request import ( - CompletionStreamV1Request, -) -from launch.api_client.model.completion_stream_v1_response import ( - CompletionStreamV1Response, -) -from launch.api_client.model.completion_sync_v1_request import ( - CompletionSyncV1Request, -) -from launch.api_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch.api_client.model.completion_tokens_details import ( - CompletionTokensDetails, -) -from launch.api_client.model.completion_usage import CompletionUsage -from launch.api_client.model.completion_v2_request import CompletionV2Request -from launch.api_client.model.completion_v2_stream_error_chunk import ( - CompletionV2StreamErrorChunk, -) -from launch.api_client.model.content import Content -from launch.api_client.model.content1 import Content1 -from launch.api_client.model.content2 import Content2 -from launch.api_client.model.content3 import Content3 -from launch.api_client.model.content4 import Content4 -from launch.api_client.model.content8 import Content8 -from launch.api_client.model.create_async_task_v1_response import ( - CreateAsyncTaskV1Response, -) -from launch.api_client.model.create_batch_completions_v1_model_config import ( - CreateBatchCompletionsV1ModelConfig, -) -from launch.api_client.model.create_batch_completions_v1_request import ( - CreateBatchCompletionsV1Request, -) -from launch.api_client.model.create_batch_completions_v1_request_content import ( - CreateBatchCompletionsV1RequestContent, -) -from launch.api_client.model.create_batch_completions_v1_response import ( - CreateBatchCompletionsV1Response, -) -from launch.api_client.model.create_batch_completions_v2_request import ( - CreateBatchCompletionsV2Request, -) -from launch.api_client.model.create_batch_job_resource_requests import ( - CreateBatchJobResourceRequests, -) -from launch.api_client.model.create_batch_job_v1_request import ( - CreateBatchJobV1Request, -) -from launch.api_client.model.create_batch_job_v1_response import ( - CreateBatchJobV1Response, -) -from launch.api_client.model.create_chat_completion_response import ( - CreateChatCompletionResponse, -) -from launch.api_client.model.create_chat_completion_stream_response import ( - CreateChatCompletionStreamResponse, -) -from launch.api_client.model.create_completion_response import ( - CreateCompletionResponse, -) -from launch.api_client.model.create_deep_speed_model_endpoint_request import ( - CreateDeepSpeedModelEndpointRequest, -) -from launch.api_client.model.create_docker_image_batch_job_bundle_v1_request import ( - CreateDockerImageBatchJobBundleV1Request, -) -from launch.api_client.model.create_docker_image_batch_job_bundle_v1_response import ( - CreateDockerImageBatchJobBundleV1Response, -) -from launch.api_client.model.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) -from launch.api_client.model.create_docker_image_batch_job_v1_request import ( - CreateDockerImageBatchJobV1Request, -) -from launch.api_client.model.create_docker_image_batch_job_v1_response import ( - CreateDockerImageBatchJobV1Response, -) -from launch.api_client.model.create_fine_tune_request import ( - CreateFineTuneRequest, -) -from launch.api_client.model.create_fine_tune_response import ( - CreateFineTuneResponse, -) -from launch.api_client.model.create_light_llm_model_endpoint_request import ( - CreateLightLLMModelEndpointRequest, -) -from launch.api_client.model.create_llm_model_endpoint_v1_request import ( - CreateLLMModelEndpointV1Request, -) -from launch.api_client.model.create_llm_model_endpoint_v1_response import ( - CreateLLMModelEndpointV1Response, -) -from launch.api_client.model.create_model_bundle_v1_request import ( - CreateModelBundleV1Request, -) -from launch.api_client.model.create_model_bundle_v1_response import ( - CreateModelBundleV1Response, -) -from launch.api_client.model.create_model_bundle_v2_request import ( - CreateModelBundleV2Request, -) -from launch.api_client.model.create_model_bundle_v2_response import ( - CreateModelBundleV2Response, -) -from launch.api_client.model.create_model_endpoint_v1_request import ( - CreateModelEndpointV1Request, -) -from launch.api_client.model.create_model_endpoint_v1_response import ( - CreateModelEndpointV1Response, -) -from launch.api_client.model.create_sg_lang_model_endpoint_request import ( - CreateSGLangModelEndpointRequest, -) -from launch.api_client.model.create_tensor_rtllm_model_endpoint_request import ( - CreateTensorRTLLMModelEndpointRequest, -) -from launch.api_client.model.create_text_generation_inference_model_endpoint_request import ( - CreateTextGenerationInferenceModelEndpointRequest, -) -from launch.api_client.model.create_trigger_v1_request import ( - CreateTriggerV1Request, -) -from launch.api_client.model.create_trigger_v1_response import ( - CreateTriggerV1Response, -) -from launch.api_client.model.create_vllm_model_endpoint_request import ( - CreateVLLMModelEndpointRequest, -) -from launch.api_client.model.custom_framework import CustomFramework -from launch.api_client.model.delete_file_response import DeleteFileResponse -from launch.api_client.model.delete_llm_endpoint_response import ( - DeleteLLMEndpointResponse, -) -from launch.api_client.model.delete_model_endpoint_v1_response import ( - DeleteModelEndpointV1Response, -) -from launch.api_client.model.delete_trigger_v1_response import ( - DeleteTriggerV1Response, -) -from launch.api_client.model.docker_image_batch_job import DockerImageBatchJob -from launch.api_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) -from launch.api_client.model.endpoint_predict_v1_request import ( - EndpointPredictV1Request, -) -from launch.api_client.model.file import File -from launch.api_client.model.filtered_chat_completion_v2_request import ( - FilteredChatCompletionV2Request, -) -from launch.api_client.model.filtered_completion_v2_request import ( - FilteredCompletionV2Request, -) -from launch.api_client.model.function1 import Function1 -from launch.api_client.model.function2 import Function2 -from launch.api_client.model.function3 import Function3 -from launch.api_client.model.function_call import FunctionCall -from launch.api_client.model.function_call2 import FunctionCall2 -from launch.api_client.model.function_object import FunctionObject -from launch.api_client.model.function_parameters import FunctionParameters -from launch.api_client.model.get_async_task_v1_response import ( - GetAsyncTaskV1Response, -) -from launch.api_client.model.get_batch_completion_v2_response import ( - GetBatchCompletionV2Response, -) -from launch.api_client.model.get_batch_job_v1_response import ( - GetBatchJobV1Response, -) -from launch.api_client.model.get_docker_image_batch_job_v1_response import ( - GetDockerImageBatchJobV1Response, -) -from launch.api_client.model.get_file_content_response import ( - GetFileContentResponse, -) -from launch.api_client.model.get_file_response import GetFileResponse -from launch.api_client.model.get_fine_tune_events_response import ( - GetFineTuneEventsResponse, -) -from launch.api_client.model.get_fine_tune_response import GetFineTuneResponse -from launch.api_client.model.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) -from launch.api_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) -from launch.api_client.model.get_trigger_v1_response import ( - GetTriggerV1Response, -) -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.image_url import ImageUrl -from launch.api_client.model.input_audio import InputAudio -from launch.api_client.model.json_schema import JsonSchema -from launch.api_client.model.list_docker_image_batch_job_bundle_v1_response import ( - ListDockerImageBatchJobBundleV1Response, -) -from launch.api_client.model.list_docker_image_batch_jobs_v1_response import ( - ListDockerImageBatchJobsV1Response, -) -from launch.api_client.model.list_files_response import ListFilesResponse -from launch.api_client.model.list_fine_tunes_response import ( - ListFineTunesResponse, -) -from launch.api_client.model.list_llm_model_endpoints_v1_response import ( - ListLLMModelEndpointsV1Response, -) -from launch.api_client.model.list_model_bundles_v1_response import ( - ListModelBundlesV1Response, -) -from launch.api_client.model.list_model_bundles_v2_response import ( - ListModelBundlesV2Response, -) -from launch.api_client.model.list_model_endpoints_v1_response import ( - ListModelEndpointsV1Response, -) -from launch.api_client.model.list_triggers_v1_response import ( - ListTriggersV1Response, -) -from launch.api_client.model.llm_fine_tune_event import LLMFineTuneEvent -from launch.api_client.model.llm_inference_framework import ( - LLMInferenceFramework, -) -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.logprobs import Logprobs -from launch.api_client.model.logprobs2 import Logprobs2 -from launch.api_client.model.metadata import Metadata -from launch.api_client.model.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch.api_client.model.model_bundle_framework_type import ( - ModelBundleFrameworkType, -) -from launch.api_client.model.model_bundle_order_by import ModelBundleOrderBy -from launch.api_client.model.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) -from launch.api_client.model.model_bundle_v1_response import ( - ModelBundleV1Response, -) -from launch.api_client.model.model_bundle_v2_response import ( - ModelBundleV2Response, -) -from launch.api_client.model.model_download_request import ModelDownloadRequest -from launch.api_client.model.model_download_response import ( - ModelDownloadResponse, -) -from launch.api_client.model.model_endpoint_deployment_state import ( - ModelEndpointDeploymentState, -) -from launch.api_client.model.model_endpoint_order_by import ( - ModelEndpointOrderBy, -) -from launch.api_client.model.model_endpoint_resource_state import ( - ModelEndpointResourceState, -) -from launch.api_client.model.model_endpoint_status import ModelEndpointStatus -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.parallel_tool_calls import ParallelToolCalls -from launch.api_client.model.prediction_content import PredictionContent -from launch.api_client.model.prompt import Prompt -from launch.api_client.model.prompt1 import Prompt1 -from launch.api_client.model.prompt1_item import Prompt1Item -from launch.api_client.model.prompt_tokens_details import PromptTokensDetails -from launch.api_client.model.pytorch_framework import PytorchFramework -from launch.api_client.model.quantization import Quantization -from launch.api_client.model.reasoning_effort import ReasoningEffort -from launch.api_client.model.request_schema import RequestSchema -from launch.api_client.model.response_format_json_object import ( - ResponseFormatJsonObject, -) -from launch.api_client.model.response_format_json_schema import ( - ResponseFormatJsonSchema, -) -from launch.api_client.model.response_format_json_schema_schema import ( - ResponseFormatJsonSchemaSchema, -) -from launch.api_client.model.response_format_text import ResponseFormatText -from launch.api_client.model.response_modalities import ResponseModalities -from launch.api_client.model.response_schema import ResponseSchema -from launch.api_client.model.restart_model_endpoint_v1_response import ( - RestartModelEndpointV1Response, -) -from launch.api_client.model.runnable_image_flavor import RunnableImageFlavor -from launch.api_client.model.service_tier import ServiceTier -from launch.api_client.model.stop_configuration import StopConfiguration -from launch.api_client.model.stop_configuration1 import StopConfiguration1 -from launch.api_client.model.stream_error import StreamError -from launch.api_client.model.stream_error_content import StreamErrorContent -from launch.api_client.model.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) -from launch.api_client.model.sync_endpoint_predict_v1_request import ( - SyncEndpointPredictV1Request, -) -from launch.api_client.model.sync_endpoint_predict_v1_response import ( - SyncEndpointPredictV1Response, -) -from launch.api_client.model.task_status import TaskStatus -from launch.api_client.model.tensorflow_framework import TensorflowFramework -from launch.api_client.model.token_output import TokenOutput -from launch.api_client.model.tool_config import ToolConfig -from launch.api_client.model.top_logprob import TopLogprob -from launch.api_client.model.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) -from launch.api_client.model.update_batch_completions_v2_request import ( - UpdateBatchCompletionsV2Request, -) -from launch.api_client.model.update_batch_completions_v2_response import ( - UpdateBatchCompletionsV2Response, -) -from launch.api_client.model.update_batch_job_v1_request import ( - UpdateBatchJobV1Request, -) -from launch.api_client.model.update_batch_job_v1_response import ( - UpdateBatchJobV1Response, -) -from launch.api_client.model.update_deep_speed_model_endpoint_request import ( - UpdateDeepSpeedModelEndpointRequest, -) -from launch.api_client.model.update_docker_image_batch_job_v1_request import ( - UpdateDockerImageBatchJobV1Request, -) -from launch.api_client.model.update_docker_image_batch_job_v1_response import ( - UpdateDockerImageBatchJobV1Response, -) -from launch.api_client.model.update_llm_model_endpoint_v1_request import ( - UpdateLLMModelEndpointV1Request, -) -from launch.api_client.model.update_llm_model_endpoint_v1_response import ( - UpdateLLMModelEndpointV1Response, -) -from launch.api_client.model.update_model_endpoint_v1_request import ( - UpdateModelEndpointV1Request, -) -from launch.api_client.model.update_model_endpoint_v1_response import ( - UpdateModelEndpointV1Response, -) -from launch.api_client.model.update_sg_lang_model_endpoint_request import ( - UpdateSGLangModelEndpointRequest, -) -from launch.api_client.model.update_text_generation_inference_model_endpoint_request import ( - UpdateTextGenerationInferenceModelEndpointRequest, -) -from launch.api_client.model.update_trigger_v1_request import ( - UpdateTriggerV1Request, -) -from launch.api_client.model.update_trigger_v1_response import ( - UpdateTriggerV1Response, -) -from launch.api_client.model.update_vllm_model_endpoint_request import ( - UpdateVLLMModelEndpointRequest, -) -from launch.api_client.model.upload_file_response import UploadFileResponse -from launch.api_client.model.url_citation import UrlCitation -from launch.api_client.model.user_location import UserLocation -from launch.api_client.model.validation_error import ValidationError -from launch.api_client.model.voice_ids_shared import VoiceIdsShared -from launch.api_client.model.web_search_context_size import ( - WebSearchContextSize, -) -from launch.api_client.model.web_search_location import WebSearchLocation -from launch.api_client.model.web_search_options import WebSearchOptions -from launch.api_client.model.zip_artifact_flavor import ZipArtifactFlavor diff --git a/launch/api_client/models/batch_job_serialization_format.py b/launch/api_client/models/batch_job_serialization_format.py deleted file mode 100644 index f63ceb0d..00000000 --- a/launch/api_client/models/batch_job_serialization_format.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class BatchJobSerializationFormat(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - JSON = "JSON" - PICKLE = "PICKLE" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of BatchJobSerializationFormat from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/batch_job_status.py b/launch/api_client/models/batch_job_status.py deleted file mode 100644 index b5eaee7b..00000000 --- a/launch/api_client/models/batch_job_status.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class BatchJobStatus(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - PENDING = "PENDING" - RUNNING = "RUNNING" - SUCCESS = "SUCCESS" - FAILURE = "FAILURE" - CANCELLED = "CANCELLED" - UNDEFINED = "UNDEFINED" - TIMEOUT = "TIMEOUT" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of BatchJobStatus from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/body_upload_file_v1_files_post.py b/launch/api_client/models/body_upload_file_v1_files_post.py deleted file mode 100644 index 61bb3a86..00000000 --- a/launch/api_client/models/body_upload_file_v1_files_post.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import BaseModel, ConfigDict, StrictBytes, StrictStr -from typing_extensions import Self - - -class BodyUploadFileV1FilesPost(BaseModel): - """ - BodyUploadFileV1FilesPost - """ # noqa: E501 - - file: Union[StrictBytes, StrictStr] - __properties: ClassVar[List[str]] = ["file"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of BodyUploadFileV1FilesPost from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of BodyUploadFileV1FilesPost from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"file": obj.get("file")}) - return _obj diff --git a/launch/api_client/models/callback_auth.py b/launch/api_client/models/callback_auth.py deleted file mode 100644 index 8c758ae1..00000000 --- a/launch/api_client/models/callback_auth.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -from typing import Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -from launch.api_client.models.callback_basic_auth import CallbackBasicAuth -from launch.api_client.models.callbackm_tls_auth import CallbackmTLSAuth - -CALLBACKAUTH_ONE_OF_SCHEMAS = ["CallbackBasicAuth", "CallbackmTLSAuth"] - - -class CallbackAuth(BaseModel): - """ - CallbackAuth - """ - - # data type: CallbackBasicAuth - oneof_schema_1_validator: Optional[CallbackBasicAuth] = None - # data type: CallbackmTLSAuth - oneof_schema_2_validator: Optional[CallbackmTLSAuth] = None - actual_instance: Optional[Union[CallbackBasicAuth, CallbackmTLSAuth]] = None - one_of_schemas: Set[str] = {"CallbackBasicAuth", "CallbackmTLSAuth"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - discriminator_value_class_map: Dict[str, str] = {} - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = CallbackAuth.model_construct() - error_messages = [] - match = 0 - # validate data type: CallbackBasicAuth - if not isinstance(v, CallbackBasicAuth): - error_messages.append(f"Error! Input type `{type(v)}` is not `CallbackBasicAuth`") - else: - match += 1 - # validate data type: CallbackmTLSAuth - if not isinstance(v, CallbackmTLSAuth): - error_messages.append(f"Error! Input type `{type(v)}` is not `CallbackmTLSAuth`") - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in CallbackAuth with oneOf schemas: CallbackBasicAuth, CallbackmTLSAuth. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in CallbackAuth with oneOf schemas: CallbackBasicAuth, CallbackmTLSAuth. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into CallbackBasicAuth - try: - instance.actual_instance = CallbackBasicAuth.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into CallbackmTLSAuth - try: - instance.actual_instance = CallbackmTLSAuth.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into CallbackAuth with oneOf schemas: CallbackBasicAuth, CallbackmTLSAuth. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into CallbackAuth with oneOf schemas: CallbackBasicAuth, CallbackmTLSAuth. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], CallbackBasicAuth, CallbackmTLSAuth]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/callback_basic_auth.py b/launch/api_client/models/callback_basic_auth.py deleted file mode 100644 index 8a43dc4d..00000000 --- a/launch/api_client/models/callback_basic_auth.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing_extensions import Self - - -class CallbackBasicAuth(BaseModel): - """ - CallbackBasicAuth - """ # noqa: E501 - - kind: StrictStr - password: StrictStr - username: StrictStr - __properties: ClassVar[List[str]] = ["kind", "password", "username"] - - @field_validator("kind") - def kind_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["basic"]): - raise ValueError("must be one of enum values ('basic')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CallbackBasicAuth from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CallbackBasicAuth from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"kind": obj.get("kind"), "password": obj.get("password"), "username": obj.get("username")} - ) - return _obj diff --git a/launch/api_client/models/callbackm_tls_auth.py b/launch/api_client/models/callbackm_tls_auth.py deleted file mode 100644 index 3ab9f5b9..00000000 --- a/launch/api_client/models/callbackm_tls_auth.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing_extensions import Self - - -class CallbackmTLSAuth(BaseModel): - """ - CallbackmTLSAuth - """ # noqa: E501 - - cert: StrictStr - key: StrictStr - kind: StrictStr - __properties: ClassVar[List[str]] = ["cert", "key", "kind"] - - @field_validator("kind") - def kind_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["mtls"]): - raise ValueError("must be one of enum values ('mtls')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CallbackmTLSAuth from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CallbackmTLSAuth from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"cert": obj.get("cert"), "key": obj.get("key"), "kind": obj.get("kind")}) - return _obj diff --git a/launch/api_client/models/cancel_fine_tune_response.py b/launch/api_client/models/cancel_fine_tune_response.py deleted file mode 100644 index ea656097..00000000 --- a/launch/api_client/models/cancel_fine_tune_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class CancelFineTuneResponse(BaseModel): - """ - CancelFineTuneResponse - """ # noqa: E501 - - success: StrictBool - __properties: ClassVar[List[str]] = ["success"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CancelFineTuneResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CancelFineTuneResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"success": obj.get("success")}) - return _obj diff --git a/launch/api_client/models/clone_model_bundle_v1_request.py b/launch/api_client/models/clone_model_bundle_v1_request.py deleted file mode 100644 index c63db806..00000000 --- a/launch/api_client/models/clone_model_bundle_v1_request.py +++ /dev/null @@ -1,89 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CloneModelBundleV1Request(BaseModel): - """ - Request object for cloning a Model Bundle from another one. - """ # noqa: E501 - - new_app_config: Optional[Dict[str, Any]] = None - original_model_bundle_id: StrictStr - __properties: ClassVar[List[str]] = ["new_app_config", "original_model_bundle_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CloneModelBundleV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CloneModelBundleV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "new_app_config": obj.get("new_app_config"), - "original_model_bundle_id": obj.get("original_model_bundle_id"), - } - ) - return _obj diff --git a/launch/api_client/models/clone_model_bundle_v2_request.py b/launch/api_client/models/clone_model_bundle_v2_request.py deleted file mode 100644 index d9325b55..00000000 --- a/launch/api_client/models/clone_model_bundle_v2_request.py +++ /dev/null @@ -1,89 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CloneModelBundleV2Request(BaseModel): - """ - Request object for cloning a Model Bundle from another one. - """ # noqa: E501 - - new_app_config: Optional[Dict[str, Any]] = None - original_model_bundle_id: StrictStr - __properties: ClassVar[List[str]] = ["new_app_config", "original_model_bundle_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CloneModelBundleV2Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CloneModelBundleV2Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "new_app_config": obj.get("new_app_config"), - "original_model_bundle_id": obj.get("original_model_bundle_id"), - } - ) - return _obj diff --git a/launch/api_client/models/cloudpickle_artifact_flavor.py b/launch/api_client/models/cloudpickle_artifact_flavor.py deleted file mode 100644 index 06014f7a..00000000 --- a/launch/api_client/models/cloudpickle_artifact_flavor.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing_extensions import Self - -from launch.api_client.models.framework import Framework - - -class CloudpickleArtifactFlavor(BaseModel): - """ - This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. - """ # noqa: E501 - - app_config: Optional[Dict[str, Any]] = None - flavor: StrictStr - framework: Framework - load_model_fn: StrictStr - load_predict_fn: StrictStr - location: StrictStr - requirements: List[StrictStr] - __properties: ClassVar[List[str]] = [ - "app_config", - "flavor", - "framework", - "load_model_fn", - "load_predict_fn", - "location", - "requirements", - ] - - @field_validator("flavor") - def flavor_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["cloudpickle_artifact"]): - raise ValueError("must be one of enum values ('cloudpickle_artifact')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CloudpickleArtifactFlavor from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of framework - if self.framework: - _dict["framework"] = self.framework.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CloudpickleArtifactFlavor from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "app_config": obj.get("app_config"), - "flavor": obj.get("flavor"), - "framework": Framework.from_dict(obj["framework"]) if obj.get("framework") is not None else None, - "load_model_fn": obj.get("load_model_fn"), - "load_predict_fn": obj.get("load_predict_fn"), - "location": obj.get("location"), - "requirements": obj.get("requirements"), - } - ) - return _obj diff --git a/launch/api_client/models/completion_output.py b/launch/api_client/models/completion_output.py deleted file mode 100644 index ba30fb59..00000000 --- a/launch/api_client/models/completion_output.py +++ /dev/null @@ -1,104 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.token_output import TokenOutput - - -class CompletionOutput(BaseModel): - """ - CompletionOutput - """ # noqa: E501 - - num_completion_tokens: StrictInt - num_prompt_tokens: StrictInt - text: StrictStr - tokens: Optional[List[TokenOutput]] = None - __properties: ClassVar[List[str]] = ["num_completion_tokens", "num_prompt_tokens", "text", "tokens"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionOutput from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in tokens (list) - _items = [] - if self.tokens: - for _item in self.tokens: - if _item: - _items.append(_item.to_dict()) - _dict["tokens"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionOutput from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "num_completion_tokens": obj.get("num_completion_tokens"), - "num_prompt_tokens": obj.get("num_prompt_tokens"), - "text": obj.get("text"), - "tokens": [TokenOutput.from_dict(_item) for _item in obj["tokens"]] - if obj.get("tokens") is not None - else None, - } - ) - return _obj diff --git a/launch/api_client/models/completion_stream_output.py b/launch/api_client/models/completion_stream_output.py deleted file mode 100644 index 2589888d..00000000 --- a/launch/api_client/models/completion_stream_output.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.token_output import TokenOutput - - -class CompletionStreamOutput(BaseModel): - """ - CompletionStreamOutput - """ # noqa: E501 - - finished: StrictBool - num_completion_tokens: Optional[StrictInt] = None - num_prompt_tokens: Optional[StrictInt] = None - text: StrictStr - token: Optional[TokenOutput] = None - __properties: ClassVar[List[str]] = ["finished", "num_completion_tokens", "num_prompt_tokens", "text", "token"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionStreamOutput from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of token - if self.token: - _dict["token"] = self.token.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionStreamOutput from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "finished": obj.get("finished"), - "num_completion_tokens": obj.get("num_completion_tokens"), - "num_prompt_tokens": obj.get("num_prompt_tokens"), - "text": obj.get("text"), - "token": TokenOutput.from_dict(obj["token"]) if obj.get("token") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/completion_stream_v1_request.py b/launch/api_client/models/completion_stream_v1_request.py deleted file mode 100644 index 345d0869..00000000 --- a/launch/api_client/models/completion_stream_v1_request.py +++ /dev/null @@ -1,145 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - - -class CompletionStreamV1Request(BaseModel): - """ - Request object for a stream prompt completion task. - """ # noqa: E501 - - frequency_penalty: Optional[ - Union[Annotated[float, Field(le=2.0, strict=True, ge=0.0)], Annotated[int, Field(le=2, strict=True, ge=0)]] - ] = None - guided_choice: Optional[List[StrictStr]] = None - guided_grammar: Optional[StrictStr] = None - guided_json: Optional[Dict[str, Any]] = None - guided_regex: Optional[StrictStr] = None - include_stop_str_in_output: Optional[StrictBool] = None - max_new_tokens: StrictInt - presence_penalty: Optional[ - Union[Annotated[float, Field(le=2.0, strict=True, ge=0.0)], Annotated[int, Field(le=2, strict=True, ge=0)]] - ] = None - prompt: StrictStr - return_token_log_probs: Optional[StrictBool] = False - stop_sequences: Optional[List[StrictStr]] = None - temperature: Union[ - Annotated[float, Field(le=1.0, strict=True, ge=0.0)], Annotated[int, Field(le=1, strict=True, ge=0)] - ] - top_k: Optional[Annotated[int, Field(strict=True, ge=-1)]] = None - top_p: Optional[ - Union[Annotated[float, Field(le=1.0, strict=True)], Annotated[int, Field(le=1, strict=True)]] - ] = None - __properties: ClassVar[List[str]] = [ - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "max_new_tokens", - "presence_penalty", - "prompt", - "return_token_log_probs", - "stop_sequences", - "temperature", - "top_k", - "top_p", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionStreamV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionStreamV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "frequency_penalty": obj.get("frequency_penalty"), - "guided_choice": obj.get("guided_choice"), - "guided_grammar": obj.get("guided_grammar"), - "guided_json": obj.get("guided_json"), - "guided_regex": obj.get("guided_regex"), - "include_stop_str_in_output": obj.get("include_stop_str_in_output"), - "max_new_tokens": obj.get("max_new_tokens"), - "presence_penalty": obj.get("presence_penalty"), - "prompt": obj.get("prompt"), - "return_token_log_probs": obj.get("return_token_log_probs") - if obj.get("return_token_log_probs") is not None - else False, - "stop_sequences": obj.get("stop_sequences"), - "temperature": obj.get("temperature"), - "top_k": obj.get("top_k"), - "top_p": obj.get("top_p"), - } - ) - return _obj diff --git a/launch/api_client/models/completion_stream_v1_response.py b/launch/api_client/models/completion_stream_v1_response.py deleted file mode 100644 index a87666f4..00000000 --- a/launch/api_client/models/completion_stream_v1_response.py +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.completion_stream_output import ( - CompletionStreamOutput, -) -from launch.api_client.models.stream_error import StreamError - - -class CompletionStreamV1Response(BaseModel): - """ - Response object for a stream prompt completion task. - """ # noqa: E501 - - error: Optional[StreamError] = None - output: Optional[CompletionStreamOutput] = None - request_id: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["error", "output", "request_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionStreamV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of error - if self.error: - _dict["error"] = self.error.to_dict() - # override the default output from pydantic by calling `to_dict()` of output - if self.output: - _dict["output"] = self.output.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionStreamV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "error": StreamError.from_dict(obj["error"]) if obj.get("error") is not None else None, - "output": CompletionStreamOutput.from_dict(obj["output"]) if obj.get("output") is not None else None, - "request_id": obj.get("request_id"), - } - ) - return _obj diff --git a/launch/api_client/models/completion_sync_v1_request.py b/launch/api_client/models/completion_sync_v1_request.py deleted file mode 100644 index a3e240d1..00000000 --- a/launch/api_client/models/completion_sync_v1_request.py +++ /dev/null @@ -1,145 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - - -class CompletionSyncV1Request(BaseModel): - """ - Request object for a synchronous prompt completion task. - """ # noqa: E501 - - frequency_penalty: Optional[ - Union[Annotated[float, Field(le=2.0, strict=True, ge=0.0)], Annotated[int, Field(le=2, strict=True, ge=0)]] - ] = None - guided_choice: Optional[List[StrictStr]] = None - guided_grammar: Optional[StrictStr] = None - guided_json: Optional[Dict[str, Any]] = None - guided_regex: Optional[StrictStr] = None - include_stop_str_in_output: Optional[StrictBool] = None - max_new_tokens: StrictInt - presence_penalty: Optional[ - Union[Annotated[float, Field(le=2.0, strict=True, ge=0.0)], Annotated[int, Field(le=2, strict=True, ge=0)]] - ] = None - prompt: StrictStr - return_token_log_probs: Optional[StrictBool] = False - stop_sequences: Optional[List[StrictStr]] = None - temperature: Union[ - Annotated[float, Field(le=1.0, strict=True, ge=0.0)], Annotated[int, Field(le=1, strict=True, ge=0)] - ] - top_k: Optional[Annotated[int, Field(strict=True, ge=-1)]] = None - top_p: Optional[ - Union[Annotated[float, Field(le=1.0, strict=True)], Annotated[int, Field(le=1, strict=True)]] - ] = None - __properties: ClassVar[List[str]] = [ - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "max_new_tokens", - "presence_penalty", - "prompt", - "return_token_log_probs", - "stop_sequences", - "temperature", - "top_k", - "top_p", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionSyncV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionSyncV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "frequency_penalty": obj.get("frequency_penalty"), - "guided_choice": obj.get("guided_choice"), - "guided_grammar": obj.get("guided_grammar"), - "guided_json": obj.get("guided_json"), - "guided_regex": obj.get("guided_regex"), - "include_stop_str_in_output": obj.get("include_stop_str_in_output"), - "max_new_tokens": obj.get("max_new_tokens"), - "presence_penalty": obj.get("presence_penalty"), - "prompt": obj.get("prompt"), - "return_token_log_probs": obj.get("return_token_log_probs") - if obj.get("return_token_log_probs") is not None - else False, - "stop_sequences": obj.get("stop_sequences"), - "temperature": obj.get("temperature"), - "top_k": obj.get("top_k"), - "top_p": obj.get("top_p"), - } - ) - return _obj diff --git a/launch/api_client/models/completion_sync_v1_response.py b/launch/api_client/models/completion_sync_v1_response.py deleted file mode 100644 index 877ac23c..00000000 --- a/launch/api_client/models/completion_sync_v1_response.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.completion_output import CompletionOutput - - -class CompletionSyncV1Response(BaseModel): - """ - Response object for a synchronous prompt completion task. - """ # noqa: E501 - - output: Optional[CompletionOutput] = None - request_id: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["output", "request_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionSyncV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of output - if self.output: - _dict["output"] = self.output.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionSyncV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "output": CompletionOutput.from_dict(obj["output"]) if obj.get("output") is not None else None, - "request_id": obj.get("request_id"), - } - ) - return _obj diff --git a/launch/api_client/models/cpus.py b/launch/api_client/models/cpus.py deleted file mode 100644 index 6606c81e..00000000 --- a/launch/api_client/models/cpus.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from inspect import getfullargspec -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictFloat, - StrictInt, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -CPUS_ANY_OF_SCHEMAS = ["float", "int", "str"] - - -class Cpus(BaseModel): - """ - Cpus - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: int - anyof_schema_2_validator: Optional[StrictInt] = None - # data type: float - anyof_schema_3_validator: Optional[Union[StrictFloat, StrictInt]] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[float, int, str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"float", "int", "str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = Cpus.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: int - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: float - try: - instance.anyof_schema_3_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in Cpus with anyOf schemas: float, int, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into int - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into float - try: - # validation - instance.anyof_schema_3_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_3_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into Cpus with anyOf schemas: float, int, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], float, int, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/create_async_task_v1_response.py b/launch/api_client/models/create_async_task_v1_response.py deleted file mode 100644 index 97c100db..00000000 --- a/launch/api_client/models/create_async_task_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateAsyncTaskV1Response(BaseModel): - """ - CreateAsyncTaskV1Response - """ # noqa: E501 - - task_id: StrictStr - __properties: ClassVar[List[str]] = ["task_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateAsyncTaskV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateAsyncTaskV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"task_id": obj.get("task_id")}) - return _obj diff --git a/launch/api_client/models/create_batch_completions_model_config.py b/launch/api_client/models/create_batch_completions_model_config.py deleted file mode 100644 index faa0509f..00000000 --- a/launch/api_client/models/create_batch_completions_model_config.py +++ /dev/null @@ -1,99 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.quantization import Quantization - - -class CreateBatchCompletionsModelConfig(BaseModel): - """ - CreateBatchCompletionsModelConfig - """ # noqa: E501 - - checkpoint_path: Optional[StrictStr] = None - labels: Dict[str, StrictStr] - model: StrictStr - num_shards: Optional[StrictInt] = 1 - quantize: Optional[Quantization] = None - seed: Optional[StrictInt] = None - __properties: ClassVar[List[str]] = ["checkpoint_path", "labels", "model", "num_shards", "quantize", "seed"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsModelConfig from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsModelConfig from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "checkpoint_path": obj.get("checkpoint_path"), - "labels": obj.get("labels"), - "model": obj.get("model"), - "num_shards": obj.get("num_shards") if obj.get("num_shards") is not None else 1, - "quantize": obj.get("quantize"), - "seed": obj.get("seed"), - } - ) - return _obj diff --git a/launch/api_client/models/create_batch_completions_request.py b/launch/api_client/models/create_batch_completions_request.py deleted file mode 100644 index b10245a9..00000000 --- a/launch/api_client/models/create_batch_completions_request.py +++ /dev/null @@ -1,128 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Annotated, Self - -from launch.api_client.models.create_batch_completions_model_config import ( - CreateBatchCompletionsModelConfig, -) -from launch.api_client.models.create_batch_completions_request_content import ( - CreateBatchCompletionsRequestContent, -) -from launch.api_client.models.tool_config import ToolConfig - - -class CreateBatchCompletionsRequest(BaseModel): - """ - Request object for batch completions. - """ # noqa: E501 - - content: Optional[CreateBatchCompletionsRequestContent] = None - data_parallelism: Optional[Annotated[int, Field(le=64, strict=True, ge=1)]] = 1 - input_data_path: Optional[StrictStr] = None - max_runtime_sec: Optional[Annotated[int, Field(le=172800, strict=True, ge=1)]] = 86400 - model_config: CreateBatchCompletionsModelConfig - output_data_path: StrictStr - tool_config: Optional[ToolConfig] = None - __properties: ClassVar[List[str]] = [ - "content", - "data_parallelism", - "input_data_path", - "max_runtime_sec", - "model_config", - "output_data_path", - "tool_config", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of content - if self.content: - _dict["content"] = self.content.to_dict() - # override the default output from pydantic by calling `to_dict()` of model_config - if self.model_config: - _dict["model_config"] = self.model_config.to_dict() - # override the default output from pydantic by calling `to_dict()` of tool_config - if self.tool_config: - _dict["tool_config"] = self.tool_config.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "content": CreateBatchCompletionsRequestContent.from_dict(obj["content"]) - if obj.get("content") is not None - else None, - "data_parallelism": obj.get("data_parallelism") if obj.get("data_parallelism") is not None else 1, - "input_data_path": obj.get("input_data_path"), - "max_runtime_sec": obj.get("max_runtime_sec") if obj.get("max_runtime_sec") is not None else 86400, - "model_config": CreateBatchCompletionsModelConfig.from_dict(obj["model_config"]) - if obj.get("model_config") is not None - else None, - "output_data_path": obj.get("output_data_path"), - "tool_config": ToolConfig.from_dict(obj["tool_config"]) if obj.get("tool_config") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/create_batch_completions_request_content.py b/launch/api_client/models/create_batch_completions_request_content.py deleted file mode 100644 index e203a52e..00000000 --- a/launch/api_client/models/create_batch_completions_request_content.py +++ /dev/null @@ -1,130 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - - -class CreateBatchCompletionsRequestContent(BaseModel): - """ - CreateBatchCompletionsRequestContent - """ # noqa: E501 - - frequency_penalty: Optional[ - Union[Annotated[float, Field(le=2.0, strict=True, ge=0.0)], Annotated[int, Field(le=2, strict=True, ge=0)]] - ] = None - max_new_tokens: StrictInt - presence_penalty: Optional[ - Union[Annotated[float, Field(le=2.0, strict=True, ge=0.0)], Annotated[int, Field(le=2, strict=True, ge=0)]] - ] = None - prompts: List[StrictStr] - return_token_log_probs: Optional[StrictBool] = False - stop_sequences: Optional[List[StrictStr]] = None - temperature: Union[ - Annotated[float, Field(le=1.0, strict=True, ge=0.0)], Annotated[int, Field(le=1, strict=True, ge=0)] - ] - top_k: Optional[Annotated[int, Field(strict=True, ge=-1)]] = None - top_p: Optional[ - Union[Annotated[float, Field(le=1.0, strict=True)], Annotated[int, Field(le=1, strict=True)]] - ] = None - __properties: ClassVar[List[str]] = [ - "frequency_penalty", - "max_new_tokens", - "presence_penalty", - "prompts", - "return_token_log_probs", - "stop_sequences", - "temperature", - "top_k", - "top_p", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsRequestContent from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsRequestContent from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "frequency_penalty": obj.get("frequency_penalty"), - "max_new_tokens": obj.get("max_new_tokens"), - "presence_penalty": obj.get("presence_penalty"), - "prompts": obj.get("prompts"), - "return_token_log_probs": obj.get("return_token_log_probs") - if obj.get("return_token_log_probs") is not None - else False, - "stop_sequences": obj.get("stop_sequences"), - "temperature": obj.get("temperature"), - "top_k": obj.get("top_k"), - "top_p": obj.get("top_p"), - } - ) - return _obj diff --git a/launch/api_client/models/create_batch_completions_response.py b/launch/api_client/models/create_batch_completions_response.py deleted file mode 100644 index 9d52b148..00000000 --- a/launch/api_client/models/create_batch_completions_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateBatchCompletionsResponse(BaseModel): - """ - CreateBatchCompletionsResponse - """ # noqa: E501 - - job_id: StrictStr - __properties: ClassVar[List[str]] = ["job_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateBatchCompletionsResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"job_id": obj.get("job_id")}) - return _obj diff --git a/launch/api_client/models/create_batch_job_resource_requests.py b/launch/api_client/models/create_batch_job_resource_requests.py deleted file mode 100644 index 27da5ee5..00000000 --- a/launch/api_client/models/create_batch_job_resource_requests.py +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt -from typing_extensions import Self - -from launch.api_client.models.cpus import Cpus -from launch.api_client.models.gpu_type import GpuType -from launch.api_client.models.memory import Memory -from launch.api_client.models.storage import Storage - - -class CreateBatchJobResourceRequests(BaseModel): - """ - CreateBatchJobResourceRequests - """ # noqa: E501 - - cpus: Optional[Cpus] = None - gpu_type: Optional[GpuType] = None - gpus: Optional[StrictInt] = None - max_workers: Optional[StrictInt] = None - memory: Optional[Memory] = None - per_worker: Optional[StrictInt] = None - storage: Optional[Storage] = None - __properties: ClassVar[List[str]] = ["cpus", "gpu_type", "gpus", "max_workers", "memory", "per_worker", "storage"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateBatchJobResourceRequests from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of cpus - if self.cpus: - _dict["cpus"] = self.cpus.to_dict() - # override the default output from pydantic by calling `to_dict()` of memory - if self.memory: - _dict["memory"] = self.memory.to_dict() - # override the default output from pydantic by calling `to_dict()` of storage - if self.storage: - _dict["storage"] = self.storage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateBatchJobResourceRequests from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "cpus": Cpus.from_dict(obj["cpus"]) if obj.get("cpus") is not None else None, - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "max_workers": obj.get("max_workers"), - "memory": Memory.from_dict(obj["memory"]) if obj.get("memory") is not None else None, - "per_worker": obj.get("per_worker"), - "storage": Storage.from_dict(obj["storage"]) if obj.get("storage") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/create_batch_job_v1_request.py b/launch/api_client/models/create_batch_job_v1_request.py deleted file mode 100644 index cca3b22d..00000000 --- a/launch/api_client/models/create_batch_job_v1_request.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.batch_job_serialization_format import ( - BatchJobSerializationFormat, -) -from launch.api_client.models.create_batch_job_resource_requests import ( - CreateBatchJobResourceRequests, -) - - -class CreateBatchJobV1Request(BaseModel): - """ - CreateBatchJobV1Request - """ # noqa: E501 - - input_path: StrictStr - labels: Dict[str, StrictStr] - model_bundle_id: StrictStr - resource_requests: CreateBatchJobResourceRequests - serialization_format: BatchJobSerializationFormat - timeout_seconds: Optional[Union[StrictFloat, StrictInt]] = 43200.0 - __properties: ClassVar[List[str]] = [ - "input_path", - "labels", - "model_bundle_id", - "resource_requests", - "serialization_format", - "timeout_seconds", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateBatchJobV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of resource_requests - if self.resource_requests: - _dict["resource_requests"] = self.resource_requests.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateBatchJobV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "input_path": obj.get("input_path"), - "labels": obj.get("labels"), - "model_bundle_id": obj.get("model_bundle_id"), - "resource_requests": CreateBatchJobResourceRequests.from_dict(obj["resource_requests"]) - if obj.get("resource_requests") is not None - else None, - "serialization_format": obj.get("serialization_format"), - "timeout_seconds": obj.get("timeout_seconds") if obj.get("timeout_seconds") is not None else 43200.0, - } - ) - return _obj diff --git a/launch/api_client/models/create_batch_job_v1_response.py b/launch/api_client/models/create_batch_job_v1_response.py deleted file mode 100644 index 6bd63bf8..00000000 --- a/launch/api_client/models/create_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateBatchJobV1Response(BaseModel): - """ - CreateBatchJobV1Response - """ # noqa: E501 - - job_id: StrictStr - __properties: ClassVar[List[str]] = ["job_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateBatchJobV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateBatchJobV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"job_id": obj.get("job_id")}) - return _obj diff --git a/launch/api_client/models/create_docker_image_batch_job_bundle_v1_request.py b/launch/api_client/models/create_docker_image_batch_job_bundle_v1_request.py deleted file mode 100644 index ac951112..00000000 --- a/launch/api_client/models/create_docker_image_batch_job_bundle_v1_request.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr -from typing_extensions import Self - -from launch.api_client.models.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) - - -class CreateDockerImageBatchJobBundleV1Request(BaseModel): - """ - CreateDockerImageBatchJobBundleV1Request - """ # noqa: E501 - - command: List[StrictStr] - env: Optional[Dict[str, StrictStr]] = None - image_repository: StrictStr - image_tag: StrictStr - mount_location: Optional[StrictStr] = None - name: StrictStr - public: Optional[StrictBool] = False - resource_requests: Optional[CreateDockerImageBatchJobResourceRequests] = None - __properties: ClassVar[List[str]] = [ - "command", - "env", - "image_repository", - "image_tag", - "mount_location", - "name", - "public", - "resource_requests", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobBundleV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of resource_requests - if self.resource_requests: - _dict["resource_requests"] = self.resource_requests.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobBundleV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "command": obj.get("command"), - "env": obj.get("env"), - "image_repository": obj.get("image_repository"), - "image_tag": obj.get("image_tag"), - "mount_location": obj.get("mount_location"), - "name": obj.get("name"), - "public": obj.get("public") if obj.get("public") is not None else False, - "resource_requests": CreateDockerImageBatchJobResourceRequests.from_dict(obj["resource_requests"]) - if obj.get("resource_requests") is not None - else None, - } - ) - return _obj diff --git a/launch/api_client/models/create_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/models/create_docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index d5b5a36c..00000000 --- a/launch/api_client/models/create_docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateDockerImageBatchJobBundleV1Response(BaseModel): - """ - CreateDockerImageBatchJobBundleV1Response - """ # noqa: E501 - - docker_image_batch_job_bundle_id: StrictStr - __properties: ClassVar[List[str]] = ["docker_image_batch_job_bundle_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobBundleV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobBundleV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"docker_image_batch_job_bundle_id": obj.get("docker_image_batch_job_bundle_id")}) - return _obj diff --git a/launch/api_client/models/create_docker_image_batch_job_resource_requests.py b/launch/api_client/models/create_docker_image_batch_job_resource_requests.py deleted file mode 100644 index 59421b1a..00000000 --- a/launch/api_client/models/create_docker_image_batch_job_resource_requests.py +++ /dev/null @@ -1,109 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt -from typing_extensions import Self - -from launch.api_client.models.cpus import Cpus -from launch.api_client.models.gpu_type import GpuType -from launch.api_client.models.memory import Memory -from launch.api_client.models.storage import Storage - - -class CreateDockerImageBatchJobResourceRequests(BaseModel): - """ - CreateDockerImageBatchJobResourceRequests - """ # noqa: E501 - - cpus: Optional[Cpus] = None - gpu_type: Optional[GpuType] = None - gpus: Optional[StrictInt] = None - memory: Optional[Memory] = None - storage: Optional[Storage] = None - __properties: ClassVar[List[str]] = ["cpus", "gpu_type", "gpus", "memory", "storage"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobResourceRequests from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of cpus - if self.cpus: - _dict["cpus"] = self.cpus.to_dict() - # override the default output from pydantic by calling `to_dict()` of memory - if self.memory: - _dict["memory"] = self.memory.to_dict() - # override the default output from pydantic by calling `to_dict()` of storage - if self.storage: - _dict["storage"] = self.storage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobResourceRequests from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "cpus": Cpus.from_dict(obj["cpus"]) if obj.get("cpus") is not None else None, - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "memory": Memory.from_dict(obj["memory"]) if obj.get("memory") is not None else None, - "storage": Storage.from_dict(obj["storage"]) if obj.get("storage") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/create_docker_image_batch_job_v1_request.py b/launch/api_client/models/create_docker_image_batch_job_v1_request.py deleted file mode 100644 index 000e7a07..00000000 --- a/launch/api_client/models/create_docker_image_batch_job_v1_request.py +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) - - -class CreateDockerImageBatchJobV1Request(BaseModel): - """ - CreateDockerImageBatchJobV1Request - """ # noqa: E501 - - docker_image_batch_job_bundle_id: Optional[StrictStr] = None - docker_image_batch_job_bundle_name: Optional[StrictStr] = None - job_config: Optional[Dict[str, Any]] = None - labels: Dict[str, StrictStr] - override_job_max_runtime_s: Optional[StrictInt] = None - resource_requests: Optional[CreateDockerImageBatchJobResourceRequests] = None - __properties: ClassVar[List[str]] = [ - "docker_image_batch_job_bundle_id", - "docker_image_batch_job_bundle_name", - "job_config", - "labels", - "override_job_max_runtime_s", - "resource_requests", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of resource_requests - if self.resource_requests: - _dict["resource_requests"] = self.resource_requests.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "docker_image_batch_job_bundle_id": obj.get("docker_image_batch_job_bundle_id"), - "docker_image_batch_job_bundle_name": obj.get("docker_image_batch_job_bundle_name"), - "job_config": obj.get("job_config"), - "labels": obj.get("labels"), - "override_job_max_runtime_s": obj.get("override_job_max_runtime_s"), - "resource_requests": CreateDockerImageBatchJobResourceRequests.from_dict(obj["resource_requests"]) - if obj.get("resource_requests") is not None - else None, - } - ) - return _obj diff --git a/launch/api_client/models/create_docker_image_batch_job_v1_response.py b/launch/api_client/models/create_docker_image_batch_job_v1_response.py deleted file mode 100644 index 5a8a48ca..00000000 --- a/launch/api_client/models/create_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateDockerImageBatchJobV1Response(BaseModel): - """ - CreateDockerImageBatchJobV1Response - """ # noqa: E501 - - job_id: StrictStr - __properties: ClassVar[List[str]] = ["job_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateDockerImageBatchJobV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"job_id": obj.get("job_id")}) - return _obj diff --git a/launch/api_client/models/create_fine_tune_request.py b/launch/api_client/models/create_fine_tune_request.py deleted file mode 100644 index 13732357..00000000 --- a/launch/api_client/models/create_fine_tune_request.py +++ /dev/null @@ -1,104 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateFineTuneRequest(BaseModel): - """ - CreateFineTuneRequest - """ # noqa: E501 - - hyperparameters: Dict[str, Any] - model: StrictStr - suffix: Optional[StrictStr] = None - training_file: StrictStr - validation_file: Optional[StrictStr] = None - wandb_config: Optional[Dict[str, Any]] = None - __properties: ClassVar[List[str]] = [ - "hyperparameters", - "model", - "suffix", - "training_file", - "validation_file", - "wandb_config", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateFineTuneRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateFineTuneRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "hyperparameters": obj.get("hyperparameters"), - "model": obj.get("model"), - "suffix": obj.get("suffix"), - "training_file": obj.get("training_file"), - "validation_file": obj.get("validation_file"), - "wandb_config": obj.get("wandb_config"), - } - ) - return _obj diff --git a/launch/api_client/models/create_fine_tune_response.py b/launch/api_client/models/create_fine_tune_response.py deleted file mode 100644 index 15f3656d..00000000 --- a/launch/api_client/models/create_fine_tune_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateFineTuneResponse(BaseModel): - """ - CreateFineTuneResponse - """ # noqa: E501 - - id: StrictStr - __properties: ClassVar[List[str]] = ["id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateFineTuneResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateFineTuneResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"id": obj.get("id")}) - return _obj diff --git a/launch/api_client/models/create_llm_model_endpoint_v1_request.py b/launch/api_client/models/create_llm_model_endpoint_v1_request.py deleted file mode 100644 index adfe3843..00000000 --- a/launch/api_client/models/create_llm_model_endpoint_v1_request.py +++ /dev/null @@ -1,202 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - -from launch.api_client.models.callback_auth import CallbackAuth -from launch.api_client.models.cpus import Cpus -from launch.api_client.models.gpu_type import GpuType -from launch.api_client.models.llm_inference_framework import ( - LLMInferenceFramework, -) -from launch.api_client.models.llm_source import LLMSource -from launch.api_client.models.memory import Memory -from launch.api_client.models.model_endpoint_type import ModelEndpointType -from launch.api_client.models.quantization import Quantization -from launch.api_client.models.storage import Storage - - -class CreateLLMModelEndpointV1Request(BaseModel): - """ - CreateLLMModelEndpointV1Request - """ # noqa: E501 - - billing_tags: Optional[Dict[str, Any]] = None - checkpoint_path: Optional[StrictStr] = None - cpus: Optional[Cpus] = None - default_callback_auth: Optional[CallbackAuth] = None - default_callback_url: Optional[Annotated[str, Field(min_length=1, strict=True, max_length=2083)]] = None - endpoint_type: Optional[ModelEndpointType] = None - gpu_type: Optional[GpuType] = None - gpus: Optional[StrictInt] = None - high_priority: Optional[StrictBool] = None - inference_framework: Optional[LLMInferenceFramework] = None - inference_framework_image_tag: Optional[StrictStr] = "latest" - labels: Dict[str, StrictStr] - max_workers: StrictInt - memory: Optional[Memory] = None - metadata: Dict[str, Any] - min_workers: StrictInt - model_name: StrictStr - name: StrictStr - num_shards: Optional[StrictInt] = 1 - optimize_costs: Optional[StrictBool] = None - per_worker: StrictInt - post_inference_hooks: Optional[List[StrictStr]] = None - prewarm: Optional[StrictBool] = None - public_inference: Optional[StrictBool] = True - quantize: Optional[Quantization] = None - source: Optional[LLMSource] = None - storage: Optional[Storage] = None - __properties: ClassVar[List[str]] = [ - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "endpoint_type", - "gpu_type", - "gpus", - "high_priority", - "inference_framework", - "inference_framework_image_tag", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_name", - "name", - "num_shards", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateLLMModelEndpointV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of cpus - if self.cpus: - _dict["cpus"] = self.cpus.to_dict() - # override the default output from pydantic by calling `to_dict()` of default_callback_auth - if self.default_callback_auth: - _dict["default_callback_auth"] = self.default_callback_auth.to_dict() - # override the default output from pydantic by calling `to_dict()` of memory - if self.memory: - _dict["memory"] = self.memory.to_dict() - # override the default output from pydantic by calling `to_dict()` of storage - if self.storage: - _dict["storage"] = self.storage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateLLMModelEndpointV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "billing_tags": obj.get("billing_tags"), - "checkpoint_path": obj.get("checkpoint_path"), - "cpus": Cpus.from_dict(obj["cpus"]) if obj.get("cpus") is not None else None, - "default_callback_auth": CallbackAuth.from_dict(obj["default_callback_auth"]) - if obj.get("default_callback_auth") is not None - else None, - "default_callback_url": obj.get("default_callback_url"), - "endpoint_type": obj.get("endpoint_type"), - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "high_priority": obj.get("high_priority"), - "inference_framework": obj.get("inference_framework"), - "inference_framework_image_tag": obj.get("inference_framework_image_tag") - if obj.get("inference_framework_image_tag") is not None - else "latest", - "labels": obj.get("labels"), - "max_workers": obj.get("max_workers"), - "memory": Memory.from_dict(obj["memory"]) if obj.get("memory") is not None else None, - "metadata": obj.get("metadata"), - "min_workers": obj.get("min_workers"), - "model_name": obj.get("model_name"), - "name": obj.get("name"), - "num_shards": obj.get("num_shards") if obj.get("num_shards") is not None else 1, - "optimize_costs": obj.get("optimize_costs"), - "per_worker": obj.get("per_worker"), - "post_inference_hooks": obj.get("post_inference_hooks"), - "prewarm": obj.get("prewarm"), - "public_inference": obj.get("public_inference") if obj.get("public_inference") is not None else True, - "quantize": obj.get("quantize"), - "source": obj.get("source"), - "storage": Storage.from_dict(obj["storage"]) if obj.get("storage") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/create_llm_model_endpoint_v1_response.py b/launch/api_client/models/create_llm_model_endpoint_v1_response.py deleted file mode 100644 index e062862a..00000000 --- a/launch/api_client/models/create_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateLLMModelEndpointV1Response(BaseModel): - """ - CreateLLMModelEndpointV1Response - """ # noqa: E501 - - endpoint_creation_task_id: StrictStr - __properties: ClassVar[List[str]] = ["endpoint_creation_task_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateLLMModelEndpointV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateLLMModelEndpointV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"endpoint_creation_task_id": obj.get("endpoint_creation_task_id")}) - return _obj diff --git a/launch/api_client/models/create_model_bundle_v1_request.py b/launch/api_client/models/create_model_bundle_v1_request.py deleted file mode 100644 index 52783c5c..00000000 --- a/launch/api_client/models/create_model_bundle_v1_request.py +++ /dev/null @@ -1,122 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch.api_client.models.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) - - -class CreateModelBundleV1Request(BaseModel): - """ - Request object for creating a Model Bundle. - """ # noqa: E501 - - app_config: Optional[Dict[str, Any]] = None - env_params: ModelBundleEnvironmentParams - location: StrictStr - metadata: Optional[Dict[str, Any]] = None - name: StrictStr - packaging_type: ModelBundlePackagingType - requirements: List[StrictStr] - schema_location: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "app_config", - "env_params", - "location", - "metadata", - "name", - "packaging_type", - "requirements", - "schema_location", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateModelBundleV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of env_params - if self.env_params: - _dict["env_params"] = self.env_params.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateModelBundleV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "app_config": obj.get("app_config"), - "env_params": ModelBundleEnvironmentParams.from_dict(obj["env_params"]) - if obj.get("env_params") is not None - else None, - "location": obj.get("location"), - "metadata": obj.get("metadata"), - "name": obj.get("name"), - "packaging_type": obj.get("packaging_type"), - "requirements": obj.get("requirements"), - "schema_location": obj.get("schema_location"), - } - ) - return _obj diff --git a/launch/api_client/models/create_model_bundle_v1_response.py b/launch/api_client/models/create_model_bundle_v1_response.py deleted file mode 100644 index 7837475f..00000000 --- a/launch/api_client/models/create_model_bundle_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateModelBundleV1Response(BaseModel): - """ - Response object for creating a Model Bundle. - """ # noqa: E501 - - model_bundle_id: StrictStr - __properties: ClassVar[List[str]] = ["model_bundle_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateModelBundleV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateModelBundleV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"model_bundle_id": obj.get("model_bundle_id")}) - return _obj diff --git a/launch/api_client/models/create_model_bundle_v2_request.py b/launch/api_client/models/create_model_bundle_v2_request.py deleted file mode 100644 index f131a98e..00000000 --- a/launch/api_client/models/create_model_bundle_v2_request.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.flavor import Flavor - - -class CreateModelBundleV2Request(BaseModel): - """ - Request object for creating a Model Bundle. - """ # noqa: E501 - - flavor: Flavor - metadata: Optional[Dict[str, Any]] = None - name: StrictStr - schema_location: StrictStr - __properties: ClassVar[List[str]] = ["flavor", "metadata", "name", "schema_location"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateModelBundleV2Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of flavor - if self.flavor: - _dict["flavor"] = self.flavor.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateModelBundleV2Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "flavor": Flavor.from_dict(obj["flavor"]) if obj.get("flavor") is not None else None, - "metadata": obj.get("metadata"), - "name": obj.get("name"), - "schema_location": obj.get("schema_location"), - } - ) - return _obj diff --git a/launch/api_client/models/create_model_bundle_v2_response.py b/launch/api_client/models/create_model_bundle_v2_response.py deleted file mode 100644 index 1531c68b..00000000 --- a/launch/api_client/models/create_model_bundle_v2_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateModelBundleV2Response(BaseModel): - """ - Response object for creating a Model Bundle. - """ # noqa: E501 - - model_bundle_id: StrictStr - __properties: ClassVar[List[str]] = ["model_bundle_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateModelBundleV2Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateModelBundleV2Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"model_bundle_id": obj.get("model_bundle_id")}) - return _obj diff --git a/launch/api_client/models/create_model_endpoint_v1_request.py b/launch/api_client/models/create_model_endpoint_v1_request.py deleted file mode 100644 index af5617cc..00000000 --- a/launch/api_client/models/create_model_endpoint_v1_request.py +++ /dev/null @@ -1,177 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - -from launch.api_client.models.callback_auth import CallbackAuth -from launch.api_client.models.cpus import Cpus -from launch.api_client.models.gpu_type import GpuType -from launch.api_client.models.memory import Memory -from launch.api_client.models.model_endpoint_type import ModelEndpointType -from launch.api_client.models.storage import Storage - - -class CreateModelEndpointV1Request(BaseModel): - """ - CreateModelEndpointV1Request - """ # noqa: E501 - - billing_tags: Optional[Dict[str, Any]] = None - cpus: Cpus - default_callback_auth: Optional[CallbackAuth] = None - default_callback_url: Optional[Annotated[str, Field(min_length=1, strict=True, max_length=2083)]] = None - endpoint_type: ModelEndpointType - gpu_type: Optional[GpuType] = None - gpus: Annotated[int, Field(strict=True, ge=0)] - high_priority: Optional[StrictBool] = None - labels: Dict[str, StrictStr] - max_workers: Annotated[int, Field(strict=True, ge=0)] - memory: Memory - metadata: Dict[str, Any] - min_workers: Annotated[int, Field(strict=True, ge=0)] - model_bundle_id: StrictStr - name: Annotated[str, Field(strict=True, max_length=63)] - optimize_costs: Optional[StrictBool] = None - per_worker: StrictInt - post_inference_hooks: Optional[List[StrictStr]] = None - prewarm: Optional[StrictBool] = None - public_inference: Optional[StrictBool] = False - storage: Optional[Storage] = None - __properties: ClassVar[List[str]] = [ - "billing_tags", - "cpus", - "default_callback_auth", - "default_callback_url", - "endpoint_type", - "gpu_type", - "gpus", - "high_priority", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "name", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateModelEndpointV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of cpus - if self.cpus: - _dict["cpus"] = self.cpus.to_dict() - # override the default output from pydantic by calling `to_dict()` of default_callback_auth - if self.default_callback_auth: - _dict["default_callback_auth"] = self.default_callback_auth.to_dict() - # override the default output from pydantic by calling `to_dict()` of memory - if self.memory: - _dict["memory"] = self.memory.to_dict() - # override the default output from pydantic by calling `to_dict()` of storage - if self.storage: - _dict["storage"] = self.storage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateModelEndpointV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "billing_tags": obj.get("billing_tags"), - "cpus": Cpus.from_dict(obj["cpus"]) if obj.get("cpus") is not None else None, - "default_callback_auth": CallbackAuth.from_dict(obj["default_callback_auth"]) - if obj.get("default_callback_auth") is not None - else None, - "default_callback_url": obj.get("default_callback_url"), - "endpoint_type": obj.get("endpoint_type"), - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "high_priority": obj.get("high_priority"), - "labels": obj.get("labels"), - "max_workers": obj.get("max_workers"), - "memory": Memory.from_dict(obj["memory"]) if obj.get("memory") is not None else None, - "metadata": obj.get("metadata"), - "min_workers": obj.get("min_workers"), - "model_bundle_id": obj.get("model_bundle_id"), - "name": obj.get("name"), - "optimize_costs": obj.get("optimize_costs"), - "per_worker": obj.get("per_worker"), - "post_inference_hooks": obj.get("post_inference_hooks"), - "prewarm": obj.get("prewarm"), - "public_inference": obj.get("public_inference") if obj.get("public_inference") is not None else False, - "storage": Storage.from_dict(obj["storage"]) if obj.get("storage") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/create_model_endpoint_v1_response.py b/launch/api_client/models/create_model_endpoint_v1_response.py deleted file mode 100644 index 375ddc06..00000000 --- a/launch/api_client/models/create_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateModelEndpointV1Response(BaseModel): - """ - CreateModelEndpointV1Response - """ # noqa: E501 - - endpoint_creation_task_id: StrictStr - __properties: ClassVar[List[str]] = ["endpoint_creation_task_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateModelEndpointV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateModelEndpointV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"endpoint_creation_task_id": obj.get("endpoint_creation_task_id")}) - return _obj diff --git a/launch/api_client/models/create_trigger_v1_request.py b/launch/api_client/models/create_trigger_v1_request.py deleted file mode 100644 index 6785ebfd..00000000 --- a/launch/api_client/models/create_trigger_v1_request.py +++ /dev/null @@ -1,101 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateTriggerV1Request(BaseModel): - """ - CreateTriggerV1Request - """ # noqa: E501 - - bundle_id: StrictStr - cron_schedule: StrictStr - default_job_config: Optional[Dict[str, Any]] = None - default_job_metadata: Optional[Dict[str, StrictStr]] = None - name: StrictStr - __properties: ClassVar[List[str]] = [ - "bundle_id", - "cron_schedule", - "default_job_config", - "default_job_metadata", - "name", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateTriggerV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateTriggerV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "bundle_id": obj.get("bundle_id"), - "cron_schedule": obj.get("cron_schedule"), - "default_job_config": obj.get("default_job_config"), - "default_job_metadata": obj.get("default_job_metadata"), - "name": obj.get("name"), - } - ) - return _obj diff --git a/launch/api_client/models/create_trigger_v1_response.py b/launch/api_client/models/create_trigger_v1_response.py deleted file mode 100644 index 51c7e8c6..00000000 --- a/launch/api_client/models/create_trigger_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class CreateTriggerV1Response(BaseModel): - """ - CreateTriggerV1Response - """ # noqa: E501 - - trigger_id: StrictStr - __properties: ClassVar[List[str]] = ["trigger_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateTriggerV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateTriggerV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"trigger_id": obj.get("trigger_id")}) - return _obj diff --git a/launch/api_client/models/custom_framework.py b/launch/api_client/models/custom_framework.py deleted file mode 100644 index 82ec1505..00000000 --- a/launch/api_client/models/custom_framework.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing_extensions import Self - - -class CustomFramework(BaseModel): - """ - This is the entity-layer class for a custom framework specification. - """ # noqa: E501 - - framework_type: StrictStr - image_repository: StrictStr - image_tag: StrictStr - __properties: ClassVar[List[str]] = ["framework_type", "image_repository", "image_tag"] - - @field_validator("framework_type") - def framework_type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["custom_base_image"]): - raise ValueError("must be one of enum values ('custom_base_image')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CustomFramework from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CustomFramework from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "framework_type": obj.get("framework_type"), - "image_repository": obj.get("image_repository"), - "image_tag": obj.get("image_tag"), - } - ) - return _obj diff --git a/launch/api_client/models/delete_file_response.py b/launch/api_client/models/delete_file_response.py deleted file mode 100644 index 2fad69d4..00000000 --- a/launch/api_client/models/delete_file_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictBool -from typing_extensions import Self - - -class DeleteFileResponse(BaseModel): - """ - Response object for deleting a file. - """ # noqa: E501 - - deleted: StrictBool = Field(description="Whether deletion was successful.") - __properties: ClassVar[List[str]] = ["deleted"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of DeleteFileResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of DeleteFileResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"deleted": obj.get("deleted")}) - return _obj diff --git a/launch/api_client/models/delete_llm_endpoint_response.py b/launch/api_client/models/delete_llm_endpoint_response.py deleted file mode 100644 index 646e1d50..00000000 --- a/launch/api_client/models/delete_llm_endpoint_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class DeleteLLMEndpointResponse(BaseModel): - """ - DeleteLLMEndpointResponse - """ # noqa: E501 - - deleted: StrictBool - __properties: ClassVar[List[str]] = ["deleted"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of DeleteLLMEndpointResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of DeleteLLMEndpointResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"deleted": obj.get("deleted")}) - return _obj diff --git a/launch/api_client/models/delete_model_endpoint_v1_response.py b/launch/api_client/models/delete_model_endpoint_v1_response.py deleted file mode 100644 index 9f4d96c6..00000000 --- a/launch/api_client/models/delete_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class DeleteModelEndpointV1Response(BaseModel): - """ - DeleteModelEndpointV1Response - """ # noqa: E501 - - deleted: StrictBool - __properties: ClassVar[List[str]] = ["deleted"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of DeleteModelEndpointV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of DeleteModelEndpointV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"deleted": obj.get("deleted")}) - return _obj diff --git a/launch/api_client/models/delete_trigger_v1_response.py b/launch/api_client/models/delete_trigger_v1_response.py deleted file mode 100644 index 40b5f2ab..00000000 --- a/launch/api_client/models/delete_trigger_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class DeleteTriggerV1Response(BaseModel): - """ - DeleteTriggerV1Response - """ # noqa: E501 - - success: StrictBool - __properties: ClassVar[List[str]] = ["success"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of DeleteTriggerV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of DeleteTriggerV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"success": obj.get("success")}) - return _obj diff --git a/launch/api_client/models/docker_image_batch_job.py b/launch/api_client/models/docker_image_batch_job.py deleted file mode 100644 index 591b20f3..00000000 --- a/launch/api_client/models/docker_image_batch_job.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from datetime import datetime -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.batch_job_status import BatchJobStatus - - -class DockerImageBatchJob(BaseModel): - """ - This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job created via the \"supply a docker image for a k8s job\" API. - """ # noqa: E501 - - annotations: Optional[Dict[str, StrictStr]] = None - completed_at: Optional[datetime] = None - created_at: datetime - created_by: StrictStr - id: StrictStr - num_workers: Optional[StrictInt] = 1 - override_job_max_runtime_s: Optional[StrictInt] = None - owner: StrictStr - status: BatchJobStatus - __properties: ClassVar[List[str]] = [ - "annotations", - "completed_at", - "created_at", - "created_by", - "id", - "num_workers", - "override_job_max_runtime_s", - "owner", - "status", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of DockerImageBatchJob from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of DockerImageBatchJob from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "annotations": obj.get("annotations"), - "completed_at": obj.get("completed_at"), - "created_at": obj.get("created_at"), - "created_by": obj.get("created_by"), - "id": obj.get("id"), - "num_workers": obj.get("num_workers") if obj.get("num_workers") is not None else 1, - "override_job_max_runtime_s": obj.get("override_job_max_runtime_s"), - "owner": obj.get("owner"), - "status": obj.get("status"), - } - ) - return _obj diff --git a/launch/api_client/models/docker_image_batch_job_bundle_v1_response.py b/launch/api_client/models/docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index 56790f11..00000000 --- a/launch/api_client/models/docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,129 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from datetime import datetime -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictInt, StrictStr -from typing_extensions import Self - - -class DockerImageBatchJobBundleV1Response(BaseModel): - """ - DockerImageBatchJobBundleV1Response - """ # noqa: E501 - - command: List[StrictStr] - cpus: Optional[StrictStr] = None - created_at: datetime - env: Dict[str, StrictStr] - gpu_type: Optional[StrictStr] = None - gpus: Optional[StrictInt] = None - id: StrictStr - image_repository: StrictStr - image_tag: StrictStr - memory: Optional[StrictStr] = None - mount_location: Optional[StrictStr] = None - name: StrictStr - public: Optional[StrictBool] = None - storage: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "command", - "cpus", - "created_at", - "env", - "gpu_type", - "gpus", - "id", - "image_repository", - "image_tag", - "memory", - "mount_location", - "name", - "public", - "storage", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of DockerImageBatchJobBundleV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of DockerImageBatchJobBundleV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "command": obj.get("command"), - "cpus": obj.get("cpus"), - "created_at": obj.get("created_at"), - "env": obj.get("env"), - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "id": obj.get("id"), - "image_repository": obj.get("image_repository"), - "image_tag": obj.get("image_tag"), - "memory": obj.get("memory"), - "mount_location": obj.get("mount_location"), - "name": obj.get("name"), - "public": obj.get("public"), - "storage": obj.get("storage"), - } - ) - return _obj diff --git a/launch/api_client/models/endpoint_predict_v1_request.py b/launch/api_client/models/endpoint_predict_v1_request.py deleted file mode 100644 index f4237979..00000000 --- a/launch/api_client/models/endpoint_predict_v1_request.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr -from typing_extensions import Self - -from launch.api_client.models.callback_auth import CallbackAuth - - -class EndpointPredictV1Request(BaseModel): - """ - EndpointPredictV1Request - """ # noqa: E501 - - args: Optional[Any] = None - callback_auth: Optional[CallbackAuth] = None - callback_url: Optional[StrictStr] = None - cloudpickle: Optional[StrictStr] = None - return_pickled: Optional[StrictBool] = False - url: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "return_pickled", - "url", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of EndpointPredictV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of callback_auth - if self.callback_auth: - _dict["callback_auth"] = self.callback_auth.to_dict() - # set to None if args (nullable) is None - # and model_fields_set contains the field - if self.args is None and "args" in self.model_fields_set: - _dict["args"] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of EndpointPredictV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "args": obj.get("args"), - "callback_auth": CallbackAuth.from_dict(obj["callback_auth"]) - if obj.get("callback_auth") is not None - else None, - "callback_url": obj.get("callback_url"), - "cloudpickle": obj.get("cloudpickle"), - "return_pickled": obj.get("return_pickled") if obj.get("return_pickled") is not None else False, - "url": obj.get("url"), - } - ) - return _obj diff --git a/launch/api_client/models/flavor.py b/launch/api_client/models/flavor.py deleted file mode 100644 index f2adc61d..00000000 --- a/launch/api_client/models/flavor.py +++ /dev/null @@ -1,237 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -from typing import Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -from launch.api_client.models.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) -from launch.api_client.models.runnable_image_flavor import RunnableImageFlavor -from launch.api_client.models.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) -from launch.api_client.models.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) -from launch.api_client.models.zip_artifact_flavor import ZipArtifactFlavor - -FLAVOR_ONE_OF_SCHEMAS = [ - "CloudpickleArtifactFlavor", - "RunnableImageFlavor", - "StreamingEnhancedRunnableImageFlavor", - "TritonEnhancedRunnableImageFlavor", - "ZipArtifactFlavor", -] - - -class Flavor(BaseModel): - """ - Flavor - """ - - # data type: CloudpickleArtifactFlavor - oneof_schema_1_validator: Optional[CloudpickleArtifactFlavor] = None - # data type: ZipArtifactFlavor - oneof_schema_2_validator: Optional[ZipArtifactFlavor] = None - # data type: RunnableImageFlavor - oneof_schema_3_validator: Optional[RunnableImageFlavor] = None - # data type: StreamingEnhancedRunnableImageFlavor - oneof_schema_4_validator: Optional[StreamingEnhancedRunnableImageFlavor] = None - # data type: TritonEnhancedRunnableImageFlavor - oneof_schema_5_validator: Optional[TritonEnhancedRunnableImageFlavor] = None - actual_instance: Optional[ - Union[ - CloudpickleArtifactFlavor, - RunnableImageFlavor, - StreamingEnhancedRunnableImageFlavor, - TritonEnhancedRunnableImageFlavor, - ZipArtifactFlavor, - ] - ] = None - one_of_schemas: Set[str] = { - "CloudpickleArtifactFlavor", - "RunnableImageFlavor", - "StreamingEnhancedRunnableImageFlavor", - "TritonEnhancedRunnableImageFlavor", - "ZipArtifactFlavor", - } - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - discriminator_value_class_map: Dict[str, str] = {} - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = Flavor.model_construct() - error_messages = [] - match = 0 - # validate data type: CloudpickleArtifactFlavor - if not isinstance(v, CloudpickleArtifactFlavor): - error_messages.append(f"Error! Input type `{type(v)}` is not `CloudpickleArtifactFlavor`") - else: - match += 1 - # validate data type: ZipArtifactFlavor - if not isinstance(v, ZipArtifactFlavor): - error_messages.append(f"Error! Input type `{type(v)}` is not `ZipArtifactFlavor`") - else: - match += 1 - # validate data type: RunnableImageFlavor - if not isinstance(v, RunnableImageFlavor): - error_messages.append(f"Error! Input type `{type(v)}` is not `RunnableImageFlavor`") - else: - match += 1 - # validate data type: StreamingEnhancedRunnableImageFlavor - if not isinstance(v, StreamingEnhancedRunnableImageFlavor): - error_messages.append(f"Error! Input type `{type(v)}` is not `StreamingEnhancedRunnableImageFlavor`") - else: - match += 1 - # validate data type: TritonEnhancedRunnableImageFlavor - if not isinstance(v, TritonEnhancedRunnableImageFlavor): - error_messages.append(f"Error! Input type `{type(v)}` is not `TritonEnhancedRunnableImageFlavor`") - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in Flavor with oneOf schemas: CloudpickleArtifactFlavor, RunnableImageFlavor, StreamingEnhancedRunnableImageFlavor, TritonEnhancedRunnableImageFlavor, ZipArtifactFlavor. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in Flavor with oneOf schemas: CloudpickleArtifactFlavor, RunnableImageFlavor, StreamingEnhancedRunnableImageFlavor, TritonEnhancedRunnableImageFlavor, ZipArtifactFlavor. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into CloudpickleArtifactFlavor - try: - instance.actual_instance = CloudpickleArtifactFlavor.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ZipArtifactFlavor - try: - instance.actual_instance = ZipArtifactFlavor.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into RunnableImageFlavor - try: - instance.actual_instance = RunnableImageFlavor.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into StreamingEnhancedRunnableImageFlavor - try: - instance.actual_instance = StreamingEnhancedRunnableImageFlavor.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into TritonEnhancedRunnableImageFlavor - try: - instance.actual_instance = TritonEnhancedRunnableImageFlavor.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into Flavor with oneOf schemas: CloudpickleArtifactFlavor, RunnableImageFlavor, StreamingEnhancedRunnableImageFlavor, TritonEnhancedRunnableImageFlavor, ZipArtifactFlavor. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into Flavor with oneOf schemas: CloudpickleArtifactFlavor, RunnableImageFlavor, StreamingEnhancedRunnableImageFlavor, TritonEnhancedRunnableImageFlavor, ZipArtifactFlavor. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[ - Union[ - Dict[str, Any], - CloudpickleArtifactFlavor, - RunnableImageFlavor, - StreamingEnhancedRunnableImageFlavor, - TritonEnhancedRunnableImageFlavor, - ZipArtifactFlavor, - ] - ]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/framework.py b/launch/api_client/models/framework.py deleted file mode 100644 index c9cb7904..00000000 --- a/launch/api_client/models/framework.py +++ /dev/null @@ -1,172 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -from typing import Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -from launch.api_client.models.custom_framework import CustomFramework -from launch.api_client.models.pytorch_framework import PytorchFramework -from launch.api_client.models.tensorflow_framework import TensorflowFramework - -FRAMEWORK_ONE_OF_SCHEMAS = ["CustomFramework", "PytorchFramework", "TensorflowFramework"] - - -class Framework(BaseModel): - """ - Framework - """ - - # data type: PytorchFramework - oneof_schema_1_validator: Optional[PytorchFramework] = None - # data type: TensorflowFramework - oneof_schema_2_validator: Optional[TensorflowFramework] = None - # data type: CustomFramework - oneof_schema_3_validator: Optional[CustomFramework] = None - actual_instance: Optional[Union[CustomFramework, PytorchFramework, TensorflowFramework]] = None - one_of_schemas: Set[str] = {"CustomFramework", "PytorchFramework", "TensorflowFramework"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - discriminator_value_class_map: Dict[str, str] = {} - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = Framework.model_construct() - error_messages = [] - match = 0 - # validate data type: PytorchFramework - if not isinstance(v, PytorchFramework): - error_messages.append(f"Error! Input type `{type(v)}` is not `PytorchFramework`") - else: - match += 1 - # validate data type: TensorflowFramework - if not isinstance(v, TensorflowFramework): - error_messages.append(f"Error! Input type `{type(v)}` is not `TensorflowFramework`") - else: - match += 1 - # validate data type: CustomFramework - if not isinstance(v, CustomFramework): - error_messages.append(f"Error! Input type `{type(v)}` is not `CustomFramework`") - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in Framework with oneOf schemas: CustomFramework, PytorchFramework, TensorflowFramework. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in Framework with oneOf schemas: CustomFramework, PytorchFramework, TensorflowFramework. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into PytorchFramework - try: - instance.actual_instance = PytorchFramework.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into TensorflowFramework - try: - instance.actual_instance = TensorflowFramework.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into CustomFramework - try: - instance.actual_instance = CustomFramework.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into Framework with oneOf schemas: CustomFramework, PytorchFramework, TensorflowFramework. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into Framework with oneOf schemas: CustomFramework, PytorchFramework, TensorflowFramework. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], CustomFramework, PytorchFramework, TensorflowFramework]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/get_async_task_v1_response.py b/launch/api_client/models/get_async_task_v1_response.py deleted file mode 100644 index 045b5055..00000000 --- a/launch/api_client/models/get_async_task_v1_response.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.task_status import TaskStatus - - -class GetAsyncTaskV1Response(BaseModel): - """ - GetAsyncTaskV1Response - """ # noqa: E501 - - result: Optional[Any] = None - status: TaskStatus - task_id: StrictStr - traceback: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["result", "status", "task_id", "traceback"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetAsyncTaskV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # set to None if result (nullable) is None - # and model_fields_set contains the field - if self.result is None and "result" in self.model_fields_set: - _dict["result"] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetAsyncTaskV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "result": obj.get("result"), - "status": obj.get("status"), - "task_id": obj.get("task_id"), - "traceback": obj.get("traceback"), - } - ) - return _obj diff --git a/launch/api_client/models/get_batch_job_v1_response.py b/launch/api_client/models/get_batch_job_v1_response.py deleted file mode 100644 index 06ea42b1..00000000 --- a/launch/api_client/models/get_batch_job_v1_response.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.batch_job_status import BatchJobStatus - - -class GetBatchJobV1Response(BaseModel): - """ - GetBatchJobV1Response - """ # noqa: E501 - - duration: Union[StrictFloat, StrictInt] - num_tasks_completed: Optional[StrictInt] = None - num_tasks_pending: Optional[StrictInt] = None - result: Optional[StrictStr] = None - status: BatchJobStatus - __properties: ClassVar[List[str]] = ["duration", "num_tasks_completed", "num_tasks_pending", "result", "status"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetBatchJobV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetBatchJobV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "duration": obj.get("duration"), - "num_tasks_completed": obj.get("num_tasks_completed"), - "num_tasks_pending": obj.get("num_tasks_pending"), - "result": obj.get("result"), - "status": obj.get("status"), - } - ) - return _obj diff --git a/launch/api_client/models/get_docker_image_batch_job_v1_response.py b/launch/api_client/models/get_docker_image_batch_job_v1_response.py deleted file mode 100644 index 6f4eba38..00000000 --- a/launch/api_client/models/get_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.batch_job_status import BatchJobStatus - - -class GetDockerImageBatchJobV1Response(BaseModel): - """ - GetDockerImageBatchJobV1Response - """ # noqa: E501 - - status: BatchJobStatus - __properties: ClassVar[List[str]] = ["status"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetDockerImageBatchJobV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetDockerImageBatchJobV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"status": obj.get("status")}) - return _obj diff --git a/launch/api_client/models/get_file_content_response.py b/launch/api_client/models/get_file_content_response.py deleted file mode 100644 index 7616386b..00000000 --- a/launch/api_client/models/get_file_content_response.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Self - - -class GetFileContentResponse(BaseModel): - """ - Response object for retrieving a file's content. - """ # noqa: E501 - - content: StrictStr = Field(description="File content.") - id: StrictStr = Field(description="ID of the requested file.") - __properties: ClassVar[List[str]] = ["content", "id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetFileContentResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetFileContentResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"content": obj.get("content"), "id": obj.get("id")}) - return _obj diff --git a/launch/api_client/models/get_file_response.py b/launch/api_client/models/get_file_response.py deleted file mode 100644 index b868113a..00000000 --- a/launch/api_client/models/get_file_response.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr -from typing_extensions import Self - - -class GetFileResponse(BaseModel): - """ - Response object for retrieving a file. - """ # noqa: E501 - - filename: StrictStr = Field(description="File name.") - id: StrictStr = Field(description="ID of the requested file.") - size: StrictInt = Field(description="Length of the file, in characters.") - __properties: ClassVar[List[str]] = ["filename", "id", "size"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetFileResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetFileResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"filename": obj.get("filename"), "id": obj.get("id"), "size": obj.get("size")}) - return _obj diff --git a/launch/api_client/models/get_fine_tune_events_response.py b/launch/api_client/models/get_fine_tune_events_response.py deleted file mode 100644 index 5898ee4c..00000000 --- a/launch/api_client/models/get_fine_tune_events_response.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.llm_fine_tune_event import LLMFineTuneEvent - - -class GetFineTuneEventsResponse(BaseModel): - """ - GetFineTuneEventsResponse - """ # noqa: E501 - - events: List[LLMFineTuneEvent] - __properties: ClassVar[List[str]] = ["events"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetFineTuneEventsResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in events (list) - _items = [] - if self.events: - for _item in self.events: - if _item: - _items.append(_item.to_dict()) - _dict["events"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetFineTuneEventsResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "events": [LLMFineTuneEvent.from_dict(_item) for _item in obj["events"]] - if obj.get("events") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/get_fine_tune_response.py b/launch/api_client/models/get_fine_tune_response.py deleted file mode 100644 index 3584adda..00000000 --- a/launch/api_client/models/get_fine_tune_response.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Self - -from launch.api_client.models.batch_job_status import BatchJobStatus - - -class GetFineTuneResponse(BaseModel): - """ - GetFineTuneResponse - """ # noqa: E501 - - fine_tuned_model: Optional[StrictStr] = Field( - default=None, - description="Name of the resulting fine-tuned model. This can be plugged into the Completion API ones the fine-tune is complete", - ) - id: StrictStr = Field(description="Unique ID of the fine tune") - status: BatchJobStatus = Field(description="Status of the requested fine tune.") - __properties: ClassVar[List[str]] = ["fine_tuned_model", "id", "status"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetFineTuneResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetFineTuneResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"fine_tuned_model": obj.get("fine_tuned_model"), "id": obj.get("id"), "status": obj.get("status")} - ) - return _obj diff --git a/launch/api_client/models/get_llm_model_endpoint_v1_response.py b/launch/api_client/models/get_llm_model_endpoint_v1_response.py deleted file mode 100644 index 8d132a69..00000000 --- a/launch/api_client/models/get_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,132 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing_extensions import Self - -from launch.api_client.models.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) -from launch.api_client.models.llm_inference_framework import ( - LLMInferenceFramework, -) -from launch.api_client.models.llm_source import LLMSource -from launch.api_client.models.model_endpoint_status import ModelEndpointStatus -from launch.api_client.models.quantization import Quantization - - -class GetLLMModelEndpointV1Response(BaseModel): - """ - GetLLMModelEndpointV1Response - """ # noqa: E501 - - checkpoint_path: Optional[StrictStr] = None - id: StrictStr - inference_framework: LLMInferenceFramework - inference_framework_image_tag: Optional[StrictStr] = None - model_name: StrictStr - name: StrictStr - num_shards: Optional[StrictInt] = None - quantize: Optional[Quantization] = None - source: LLMSource - spec: Optional[GetModelEndpointV1Response] = None - status: ModelEndpointStatus - __properties: ClassVar[List[str]] = [ - "checkpoint_path", - "id", - "inference_framework", - "inference_framework_image_tag", - "model_name", - "name", - "num_shards", - "quantize", - "source", - "spec", - "status", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetLLMModelEndpointV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of spec - if self.spec: - _dict["spec"] = self.spec.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetLLMModelEndpointV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "checkpoint_path": obj.get("checkpoint_path"), - "id": obj.get("id"), - "inference_framework": obj.get("inference_framework"), - "inference_framework_image_tag": obj.get("inference_framework_image_tag"), - "model_name": obj.get("model_name"), - "name": obj.get("name"), - "num_shards": obj.get("num_shards"), - "quantize": obj.get("quantize"), - "source": obj.get("source"), - "spec": GetModelEndpointV1Response.from_dict(obj["spec"]) if obj.get("spec") is not None else None, - "status": obj.get("status"), - } - ) - return _obj diff --git a/launch/api_client/models/get_model_endpoint_v1_response.py b/launch/api_client/models/get_model_endpoint_v1_response.py deleted file mode 100644 index fbe07eb0..00000000 --- a/launch/api_client/models/get_model_endpoint_v1_response.py +++ /dev/null @@ -1,182 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from datetime import datetime -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - -from launch.api_client.models.callback_auth import CallbackAuth -from launch.api_client.models.model_endpoint_deployment_state import ( - ModelEndpointDeploymentState, -) -from launch.api_client.models.model_endpoint_resource_state import ( - ModelEndpointResourceState, -) -from launch.api_client.models.model_endpoint_status import ModelEndpointStatus -from launch.api_client.models.model_endpoint_type import ModelEndpointType - - -class GetModelEndpointV1Response(BaseModel): - """ - GetModelEndpointV1Response - """ # noqa: E501 - - aws_role: Optional[StrictStr] = None - bundle_name: StrictStr - created_at: datetime - created_by: StrictStr - default_callback_auth: Optional[CallbackAuth] = None - default_callback_url: Optional[Annotated[str, Field(min_length=1, strict=True, max_length=2083)]] = None - deployment_name: Optional[StrictStr] = None - deployment_state: Optional[ModelEndpointDeploymentState] = None - destination: StrictStr - endpoint_type: ModelEndpointType - id: StrictStr - labels: Optional[Dict[str, StrictStr]] = None - last_updated_at: datetime - metadata: Optional[Dict[str, Any]] = None - name: StrictStr - num_queued_items: Optional[StrictInt] = None - post_inference_hooks: Optional[List[StrictStr]] = None - public_inference: Optional[StrictBool] = None - resource_state: Optional[ModelEndpointResourceState] = None - results_s3_bucket: Optional[StrictStr] = None - status: ModelEndpointStatus - __properties: ClassVar[List[str]] = [ - "aws_role", - "bundle_name", - "created_at", - "created_by", - "default_callback_auth", - "default_callback_url", - "deployment_name", - "deployment_state", - "destination", - "endpoint_type", - "id", - "labels", - "last_updated_at", - "metadata", - "name", - "num_queued_items", - "post_inference_hooks", - "public_inference", - "resource_state", - "results_s3_bucket", - "status", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetModelEndpointV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of default_callback_auth - if self.default_callback_auth: - _dict["default_callback_auth"] = self.default_callback_auth.to_dict() - # override the default output from pydantic by calling `to_dict()` of deployment_state - if self.deployment_state: - _dict["deployment_state"] = self.deployment_state.to_dict() - # override the default output from pydantic by calling `to_dict()` of resource_state - if self.resource_state: - _dict["resource_state"] = self.resource_state.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetModelEndpointV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "aws_role": obj.get("aws_role"), - "bundle_name": obj.get("bundle_name"), - "created_at": obj.get("created_at"), - "created_by": obj.get("created_by"), - "default_callback_auth": CallbackAuth.from_dict(obj["default_callback_auth"]) - if obj.get("default_callback_auth") is not None - else None, - "default_callback_url": obj.get("default_callback_url"), - "deployment_name": obj.get("deployment_name"), - "deployment_state": ModelEndpointDeploymentState.from_dict(obj["deployment_state"]) - if obj.get("deployment_state") is not None - else None, - "destination": obj.get("destination"), - "endpoint_type": obj.get("endpoint_type"), - "id": obj.get("id"), - "labels": obj.get("labels"), - "last_updated_at": obj.get("last_updated_at"), - "metadata": obj.get("metadata"), - "name": obj.get("name"), - "num_queued_items": obj.get("num_queued_items"), - "post_inference_hooks": obj.get("post_inference_hooks"), - "public_inference": obj.get("public_inference"), - "resource_state": ModelEndpointResourceState.from_dict(obj["resource_state"]) - if obj.get("resource_state") is not None - else None, - "results_s3_bucket": obj.get("results_s3_bucket"), - "status": obj.get("status"), - } - ) - return _obj diff --git a/launch/api_client/models/get_trigger_v1_response.py b/launch/api_client/models/get_trigger_v1_response.py deleted file mode 100644 index 58c0f35a..00000000 --- a/launch/api_client/models/get_trigger_v1_response.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from datetime import datetime -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class GetTriggerV1Response(BaseModel): - """ - GetTriggerV1Response - """ # noqa: E501 - - created_at: datetime - created_by: StrictStr - cron_schedule: StrictStr - default_job_config: Optional[Dict[str, Any]] = None - default_job_metadata: Optional[Dict[str, StrictStr]] = None - docker_image_batch_job_bundle_id: StrictStr - id: StrictStr - name: StrictStr - owner: StrictStr - __properties: ClassVar[List[str]] = [ - "created_at", - "created_by", - "cron_schedule", - "default_job_config", - "default_job_metadata", - "docker_image_batch_job_bundle_id", - "id", - "name", - "owner", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GetTriggerV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GetTriggerV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "created_at": obj.get("created_at"), - "created_by": obj.get("created_by"), - "cron_schedule": obj.get("cron_schedule"), - "default_job_config": obj.get("default_job_config"), - "default_job_metadata": obj.get("default_job_metadata"), - "docker_image_batch_job_bundle_id": obj.get("docker_image_batch_job_bundle_id"), - "id": obj.get("id"), - "name": obj.get("name"), - "owner": obj.get("owner"), - } - ) - return _obj diff --git a/launch/api_client/models/gpu_type.py b/launch/api_client/models/gpu_type.py deleted file mode 100644 index 8d247e0a..00000000 --- a/launch/api_client/models/gpu_type.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class GpuType(str, Enum): - """ - Lists allowed GPU types for Launch. - """ - - """ - allowed enum values - """ - NVIDIA_MINUS_TESLA_MINUS_T4 = "nvidia-tesla-t4" - NVIDIA_MINUS_AMPERE_MINUS_A10 = "nvidia-ampere-a10" - NVIDIA_MINUS_AMPERE_MINUS_A100 = "nvidia-ampere-a100" - NVIDIA_MINUS_AMPERE_MINUS_A100E = "nvidia-ampere-a100e" - NVIDIA_MINUS_HOPPER_MINUS_H100 = "nvidia-hopper-h100" - NVIDIA_MINUS_HOPPER_MINUS_H100_MINUS_1G20GB = "nvidia-hopper-h100-1g20gb" - NVIDIA_MINUS_HOPPER_MINUS_H100_MINUS_3G40GB = "nvidia-hopper-h100-3g40gb" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of GpuType from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/http_validation_error.py b/launch/api_client/models/http_validation_error.py deleted file mode 100644 index cb4ca8c7..00000000 --- a/launch/api_client/models/http_validation_error.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.validation_error import ValidationError - - -class HTTPValidationError(BaseModel): - """ - HTTPValidationError - """ # noqa: E501 - - detail: Optional[List[ValidationError]] = None - __properties: ClassVar[List[str]] = ["detail"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of HTTPValidationError from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in detail (list) - _items = [] - if self.detail: - for _item in self.detail: - if _item: - _items.append(_item.to_dict()) - _dict["detail"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of HTTPValidationError from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "detail": [ValidationError.from_dict(_item) for _item in obj["detail"]] - if obj.get("detail") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/hyperparameters.py b/launch/api_client/models/hyperparameters.py deleted file mode 100644 index e4c6a5fa..00000000 --- a/launch/api_client/models/hyperparameters.py +++ /dev/null @@ -1,186 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from inspect import getfullargspec -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictFloat, - StrictInt, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -HYPERPARAMETERS_ANY_OF_SCHEMAS = ["float", "int", "object", "str"] - - -class Hyperparameters(BaseModel): - """ - Hyperparameters - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: int - anyof_schema_2_validator: Optional[StrictInt] = None - # data type: float - anyof_schema_3_validator: Optional[Union[StrictFloat, StrictInt]] = None - # data type: object - anyof_schema_4_validator: Optional[Dict[str, Any]] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[float, int, object, str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"float", "int", "object", "str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = Hyperparameters.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: int - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: float - try: - instance.anyof_schema_3_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: object - try: - instance.anyof_schema_4_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in Hyperparameters with anyOf schemas: float, int, object, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into int - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into float - try: - # validation - instance.anyof_schema_3_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_3_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into object - try: - # validation - instance.anyof_schema_4_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_4_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into Hyperparameters with anyOf schemas: float, int, object, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], float, int, object, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/list_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/models/list_docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index 0b746491..00000000 --- a/launch/api_client/models/list_docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,103 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) - - -class ListDockerImageBatchJobBundleV1Response(BaseModel): - """ - ListDockerImageBatchJobBundleV1Response - """ # noqa: E501 - - docker_image_batch_job_bundles: List[DockerImageBatchJobBundleV1Response] - __properties: ClassVar[List[str]] = ["docker_image_batch_job_bundles"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListDockerImageBatchJobBundleV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in docker_image_batch_job_bundles (list) - _items = [] - if self.docker_image_batch_job_bundles: - for _item in self.docker_image_batch_job_bundles: - if _item: - _items.append(_item.to_dict()) - _dict["docker_image_batch_job_bundles"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListDockerImageBatchJobBundleV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "docker_image_batch_job_bundles": [ - DockerImageBatchJobBundleV1Response.from_dict(_item) - for _item in obj["docker_image_batch_job_bundles"] - ] - if obj.get("docker_image_batch_job_bundles") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_docker_image_batch_jobs_v1_response.py b/launch/api_client/models/list_docker_image_batch_jobs_v1_response.py deleted file mode 100644 index 18577b41..00000000 --- a/launch/api_client/models/list_docker_image_batch_jobs_v1_response.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.docker_image_batch_job import DockerImageBatchJob - - -class ListDockerImageBatchJobsV1Response(BaseModel): - """ - ListDockerImageBatchJobsV1Response - """ # noqa: E501 - - jobs: List[DockerImageBatchJob] - __properties: ClassVar[List[str]] = ["jobs"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListDockerImageBatchJobsV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in jobs (list) - _items = [] - if self.jobs: - for _item in self.jobs: - if _item: - _items.append(_item.to_dict()) - _dict["jobs"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListDockerImageBatchJobsV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "jobs": [DockerImageBatchJob.from_dict(_item) for _item in obj["jobs"]] - if obj.get("jobs") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_files_response.py b/launch/api_client/models/list_files_response.py deleted file mode 100644 index 64adfd3a..00000000 --- a/launch/api_client/models/list_files_response.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import Self - -from launch.api_client.models.get_file_response import GetFileResponse - - -class ListFilesResponse(BaseModel): - """ - Response object for listing files. - """ # noqa: E501 - - files: List[GetFileResponse] = Field(description="List of file IDs, names, and sizes.") - __properties: ClassVar[List[str]] = ["files"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListFilesResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in files (list) - _items = [] - if self.files: - for _item in self.files: - if _item: - _items.append(_item.to_dict()) - _dict["files"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListFilesResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "files": [GetFileResponse.from_dict(_item) for _item in obj["files"]] - if obj.get("files") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_fine_tunes_response.py b/launch/api_client/models/list_fine_tunes_response.py deleted file mode 100644 index 0ed64af2..00000000 --- a/launch/api_client/models/list_fine_tunes_response.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.get_fine_tune_response import GetFineTuneResponse - - -class ListFineTunesResponse(BaseModel): - """ - ListFineTunesResponse - """ # noqa: E501 - - jobs: List[GetFineTuneResponse] - __properties: ClassVar[List[str]] = ["jobs"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListFineTunesResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in jobs (list) - _items = [] - if self.jobs: - for _item in self.jobs: - if _item: - _items.append(_item.to_dict()) - _dict["jobs"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListFineTunesResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "jobs": [GetFineTuneResponse.from_dict(_item) for _item in obj["jobs"]] - if obj.get("jobs") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_llm_model_endpoints_v1_response.py b/launch/api_client/models/list_llm_model_endpoints_v1_response.py deleted file mode 100644 index 89aa5e8b..00000000 --- a/launch/api_client/models/list_llm_model_endpoints_v1_response.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) - - -class ListLLMModelEndpointsV1Response(BaseModel): - """ - ListLLMModelEndpointsV1Response - """ # noqa: E501 - - model_endpoints: List[GetLLMModelEndpointV1Response] - __properties: ClassVar[List[str]] = ["model_endpoints"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListLLMModelEndpointsV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in model_endpoints (list) - _items = [] - if self.model_endpoints: - for _item in self.model_endpoints: - if _item: - _items.append(_item.to_dict()) - _dict["model_endpoints"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListLLMModelEndpointsV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "model_endpoints": [GetLLMModelEndpointV1Response.from_dict(_item) for _item in obj["model_endpoints"]] - if obj.get("model_endpoints") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_model_bundles_v1_response.py b/launch/api_client/models/list_model_bundles_v1_response.py deleted file mode 100644 index 81252447..00000000 --- a/launch/api_client/models/list_model_bundles_v1_response.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.model_bundle_v1_response import ( - ModelBundleV1Response, -) - - -class ListModelBundlesV1Response(BaseModel): - """ - Response object for listing Model Bundles. - """ # noqa: E501 - - model_bundles: List[ModelBundleV1Response] - __properties: ClassVar[List[str]] = ["model_bundles"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListModelBundlesV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in model_bundles (list) - _items = [] - if self.model_bundles: - for _item in self.model_bundles: - if _item: - _items.append(_item.to_dict()) - _dict["model_bundles"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListModelBundlesV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "model_bundles": [ModelBundleV1Response.from_dict(_item) for _item in obj["model_bundles"]] - if obj.get("model_bundles") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_model_bundles_v2_response.py b/launch/api_client/models/list_model_bundles_v2_response.py deleted file mode 100644 index c2d94094..00000000 --- a/launch/api_client/models/list_model_bundles_v2_response.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.model_bundle_v2_response import ( - ModelBundleV2Response, -) - - -class ListModelBundlesV2Response(BaseModel): - """ - Response object for listing Model Bundles. - """ # noqa: E501 - - model_bundles: List[ModelBundleV2Response] - __properties: ClassVar[List[str]] = ["model_bundles"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListModelBundlesV2Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in model_bundles (list) - _items = [] - if self.model_bundles: - for _item in self.model_bundles: - if _item: - _items.append(_item.to_dict()) - _dict["model_bundles"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListModelBundlesV2Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "model_bundles": [ModelBundleV2Response.from_dict(_item) for _item in obj["model_bundles"]] - if obj.get("model_bundles") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_model_endpoints_v1_response.py b/launch/api_client/models/list_model_endpoints_v1_response.py deleted file mode 100644 index 25b8e534..00000000 --- a/launch/api_client/models/list_model_endpoints_v1_response.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) - - -class ListModelEndpointsV1Response(BaseModel): - """ - ListModelEndpointsV1Response - """ # noqa: E501 - - model_endpoints: List[GetModelEndpointV1Response] - __properties: ClassVar[List[str]] = ["model_endpoints"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListModelEndpointsV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in model_endpoints (list) - _items = [] - if self.model_endpoints: - for _item in self.model_endpoints: - if _item: - _items.append(_item.to_dict()) - _dict["model_endpoints"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListModelEndpointsV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "model_endpoints": [GetModelEndpointV1Response.from_dict(_item) for _item in obj["model_endpoints"]] - if obj.get("model_endpoints") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/list_triggers_v1_response.py b/launch/api_client/models/list_triggers_v1_response.py deleted file mode 100644 index 884644ed..00000000 --- a/launch/api_client/models/list_triggers_v1_response.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from launch.api_client.models.get_trigger_v1_response import ( - GetTriggerV1Response, -) - - -class ListTriggersV1Response(BaseModel): - """ - ListTriggersV1Response - """ # noqa: E501 - - triggers: List[GetTriggerV1Response] - __properties: ClassVar[List[str]] = ["triggers"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListTriggersV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in triggers (list) - _items = [] - if self.triggers: - for _item in self.triggers: - if _item: - _items.append(_item.to_dict()) - _dict["triggers"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListTriggersV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "triggers": [GetTriggerV1Response.from_dict(_item) for _item in obj["triggers"]] - if obj.get("triggers") is not None - else None - } - ) - return _obj diff --git a/launch/api_client/models/llm_fine_tune_event.py b/launch/api_client/models/llm_fine_tune_event.py deleted file mode 100644 index 86a9a174..00000000 --- a/launch/api_client/models/llm_fine_tune_event.py +++ /dev/null @@ -1,87 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr -from typing_extensions import Self - - -class LLMFineTuneEvent(BaseModel): - """ - LLMFineTuneEvent - """ # noqa: E501 - - level: StrictStr - message: StrictStr - timestamp: Optional[Union[StrictFloat, StrictInt]] = None - __properties: ClassVar[List[str]] = ["level", "message", "timestamp"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of LLMFineTuneEvent from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of LLMFineTuneEvent from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"level": obj.get("level"), "message": obj.get("message"), "timestamp": obj.get("timestamp")} - ) - return _obj diff --git a/launch/api_client/models/llm_inference_framework.py b/launch/api_client/models/llm_inference_framework.py deleted file mode 100644 index 14b45552..00000000 --- a/launch/api_client/models/llm_inference_framework.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class LLMInferenceFramework(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - DEEPSPEED = "deepspeed" - TEXT_GENERATION_INFERENCE = "text_generation_inference" - VLLM = "vllm" - LIGHTLLM = "lightllm" - TENSORRT_LLM = "tensorrt_llm" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of LLMInferenceFramework from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/llm_source.py b/launch/api_client/models/llm_source.py deleted file mode 100644 index 8ddbfe7b..00000000 --- a/launch/api_client/models/llm_source.py +++ /dev/null @@ -1,36 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class LLMSource(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - HUGGING_FACE = "hugging_face" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of LLMSource from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/memory.py b/launch/api_client/models/memory.py deleted file mode 100644 index 0292256a..00000000 --- a/launch/api_client/models/memory.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from inspect import getfullargspec -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictFloat, - StrictInt, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -MEMORY_ANY_OF_SCHEMAS = ["float", "int", "str"] - - -class Memory(BaseModel): - """ - Memory - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: int - anyof_schema_2_validator: Optional[StrictInt] = None - # data type: float - anyof_schema_3_validator: Optional[Union[StrictFloat, StrictInt]] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[float, int, str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"float", "int", "str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = Memory.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: int - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: float - try: - instance.anyof_schema_3_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in Memory with anyOf schemas: float, int, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into int - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into float - try: - # validation - instance.anyof_schema_3_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_3_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into Memory with anyOf schemas: float, int, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], float, int, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/model_bundle_environment_params.py b/launch/api_client/models/model_bundle_environment_params.py deleted file mode 100644 index dc82b529..00000000 --- a/launch/api_client/models/model_bundle_environment_params.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.model_bundle_framework_type import ( - ModelBundleFrameworkType, -) - - -class ModelBundleEnvironmentParams(BaseModel): - """ - This is the entity-layer class for the Model Bundle environment parameters. Being an entity-layer class, it should be a plain data object. - """ # noqa: E501 - - ecr_repo: Optional[StrictStr] = None - framework_type: ModelBundleFrameworkType - image_tag: Optional[StrictStr] = None - pytorch_image_tag: Optional[StrictStr] = None - tensorflow_version: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "ecr_repo", - "framework_type", - "image_tag", - "pytorch_image_tag", - "tensorflow_version", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelBundleEnvironmentParams from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelBundleEnvironmentParams from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "ecr_repo": obj.get("ecr_repo"), - "framework_type": obj.get("framework_type"), - "image_tag": obj.get("image_tag"), - "pytorch_image_tag": obj.get("pytorch_image_tag"), - "tensorflow_version": obj.get("tensorflow_version"), - } - ) - return _obj diff --git a/launch/api_client/models/model_bundle_framework_type.py b/launch/api_client/models/model_bundle_framework_type.py deleted file mode 100644 index e585ec87..00000000 --- a/launch/api_client/models/model_bundle_framework_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class ModelBundleFrameworkType(str, Enum): - """ - The canonical list of possible machine learning frameworks of Model Bundles. - """ - - """ - allowed enum values - """ - PYTORCH = "pytorch" - TENSORFLOW = "tensorflow" - CUSTOM_BASE_IMAGE = "custom_base_image" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of ModelBundleFrameworkType from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/model_bundle_order_by.py b/launch/api_client/models/model_bundle_order_by.py deleted file mode 100644 index a6a51a1d..00000000 --- a/launch/api_client/models/model_bundle_order_by.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class ModelBundleOrderBy(str, Enum): - """ - The canonical list of possible orderings of Model Bundles. - """ - - """ - allowed enum values - """ - NEWEST = "newest" - OLDEST = "oldest" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of ModelBundleOrderBy from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/model_bundle_packaging_type.py b/launch/api_client/models/model_bundle_packaging_type.py deleted file mode 100644 index fad38cf3..00000000 --- a/launch/api_client/models/model_bundle_packaging_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class ModelBundlePackagingType(str, Enum): - """ - The canonical list of possible packaging types for Model Bundles. These values broadly determine how the model endpoint will obtain its code & dependencies. - """ - - """ - allowed enum values - """ - CLOUDPICKLE = "cloudpickle" - ZIP = "zip" - LIRA = "lira" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of ModelBundlePackagingType from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/model_bundle_v1_response.py b/launch/api_client/models/model_bundle_v1_response.py deleted file mode 100644 index b740e5bf..00000000 --- a/launch/api_client/models/model_bundle_v1_response.py +++ /dev/null @@ -1,132 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from datetime import datetime -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch.api_client.models.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) - - -class ModelBundleV1Response(BaseModel): - """ - Response object for a single Model Bundle. - """ # noqa: E501 - - app_config: Optional[Dict[str, Any]] = None - created_at: datetime - env_params: ModelBundleEnvironmentParams - id: StrictStr - location: StrictStr - metadata: Dict[str, Any] - model_artifact_ids: List[StrictStr] - name: StrictStr - packaging_type: ModelBundlePackagingType - requirements: List[StrictStr] - schema_location: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "app_config", - "created_at", - "env_params", - "id", - "location", - "metadata", - "model_artifact_ids", - "name", - "packaging_type", - "requirements", - "schema_location", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelBundleV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of env_params - if self.env_params: - _dict["env_params"] = self.env_params.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelBundleV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "app_config": obj.get("app_config"), - "created_at": obj.get("created_at"), - "env_params": ModelBundleEnvironmentParams.from_dict(obj["env_params"]) - if obj.get("env_params") is not None - else None, - "id": obj.get("id"), - "location": obj.get("location"), - "metadata": obj.get("metadata"), - "model_artifact_ids": obj.get("model_artifact_ids"), - "name": obj.get("name"), - "packaging_type": obj.get("packaging_type"), - "requirements": obj.get("requirements"), - "schema_location": obj.get("schema_location"), - } - ) - return _obj diff --git a/launch/api_client/models/model_bundle_v2_response.py b/launch/api_client/models/model_bundle_v2_response.py deleted file mode 100644 index b2ad8bff..00000000 --- a/launch/api_client/models/model_bundle_v2_response.py +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from datetime import datetime -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.flavor import Flavor - - -class ModelBundleV2Response(BaseModel): - """ - Response object for a single Model Bundle. - """ # noqa: E501 - - created_at: datetime - flavor: Flavor - id: StrictStr - metadata: Dict[str, Any] - model_artifact_ids: List[StrictStr] - name: StrictStr - schema_location: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "created_at", - "flavor", - "id", - "metadata", - "model_artifact_ids", - "name", - "schema_location", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelBundleV2Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of flavor - if self.flavor: - _dict["flavor"] = self.flavor.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelBundleV2Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "created_at": obj.get("created_at"), - "flavor": Flavor.from_dict(obj["flavor"]) if obj.get("flavor") is not None else None, - "id": obj.get("id"), - "metadata": obj.get("metadata"), - "model_artifact_ids": obj.get("model_artifact_ids"), - "name": obj.get("name"), - "schema_location": obj.get("schema_location"), - } - ) - return _obj diff --git a/launch/api_client/models/model_download_request.py b/launch/api_client/models/model_download_request.py deleted file mode 100644 index f867063a..00000000 --- a/launch/api_client/models/model_download_request.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Self - - -class ModelDownloadRequest(BaseModel): - """ - ModelDownloadRequest - """ # noqa: E501 - - download_format: Optional[StrictStr] = Field( - default="hugging_face", - description="Format that you want the downloaded urls to be compatible with. Currently only supports hugging_face", - ) - model_name: StrictStr = Field(description="Name of the fine tuned model") - __properties: ClassVar[List[str]] = ["download_format", "model_name"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelDownloadRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelDownloadRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "download_format": obj.get("download_format") - if obj.get("download_format") is not None - else "hugging_face", - "model_name": obj.get("model_name"), - } - ) - return _obj diff --git a/launch/api_client/models/model_download_response.py b/launch/api_client/models/model_download_response.py deleted file mode 100644 index 578931a2..00000000 --- a/launch/api_client/models/model_download_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Self - - -class ModelDownloadResponse(BaseModel): - """ - ModelDownloadResponse - """ # noqa: E501 - - urls: Dict[str, StrictStr] = Field(description="Dictionary of (file_name, url) pairs to download the model from.") - __properties: ClassVar[List[str]] = ["urls"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelDownloadResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelDownloadResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"urls": obj.get("urls")}) - return _obj diff --git a/launch/api_client/models/model_endpoint_deployment_state.py b/launch/api_client/models/model_endpoint_deployment_state.py deleted file mode 100644 index 9092d9a1..00000000 --- a/launch/api_client/models/model_endpoint_deployment_state.py +++ /dev/null @@ -1,101 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictInt -from typing_extensions import Annotated, Self - - -class ModelEndpointDeploymentState(BaseModel): - """ - This is the entity-layer class for the deployment settings related to a Model Endpoint. - """ # noqa: E501 - - available_workers: Optional[Annotated[int, Field(strict=True, ge=0)]] = None - max_workers: Annotated[int, Field(strict=True, ge=0)] - min_workers: Annotated[int, Field(strict=True, ge=0)] - per_worker: StrictInt - unavailable_workers: Optional[Annotated[int, Field(strict=True, ge=0)]] = None - __properties: ClassVar[List[str]] = [ - "available_workers", - "max_workers", - "min_workers", - "per_worker", - "unavailable_workers", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelEndpointDeploymentState from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelEndpointDeploymentState from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "available_workers": obj.get("available_workers"), - "max_workers": obj.get("max_workers"), - "min_workers": obj.get("min_workers"), - "per_worker": obj.get("per_worker"), - "unavailable_workers": obj.get("unavailable_workers"), - } - ) - return _obj diff --git a/launch/api_client/models/model_endpoint_order_by.py b/launch/api_client/models/model_endpoint_order_by.py deleted file mode 100644 index d6ba3265..00000000 --- a/launch/api_client/models/model_endpoint_order_by.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class ModelEndpointOrderBy(str, Enum): - """ - The canonical list of possible orderings of Model Bundles. - """ - - """ - allowed enum values - """ - NEWEST = "newest" - OLDEST = "oldest" - ALPHABETICAL = "alphabetical" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of ModelEndpointOrderBy from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/model_endpoint_resource_state.py b/launch/api_client/models/model_endpoint_resource_state.py deleted file mode 100644 index a9544dac..00000000 --- a/launch/api_client/models/model_endpoint_resource_state.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictBool -from typing_extensions import Annotated, Self - -from launch.api_client.models.cpus import Cpus -from launch.api_client.models.gpu_type import GpuType -from launch.api_client.models.memory import Memory -from launch.api_client.models.storage import Storage - - -class ModelEndpointResourceState(BaseModel): - """ - This is the entity-layer class for the resource settings per worker of a Model Endpoint. - """ # noqa: E501 - - cpus: Cpus - gpu_type: Optional[GpuType] = None - gpus: Annotated[int, Field(strict=True, ge=0)] - memory: Memory - optimize_costs: Optional[StrictBool] = None - storage: Optional[Storage] = None - __properties: ClassVar[List[str]] = ["cpus", "gpu_type", "gpus", "memory", "optimize_costs", "storage"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelEndpointResourceState from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of cpus - if self.cpus: - _dict["cpus"] = self.cpus.to_dict() - # override the default output from pydantic by calling `to_dict()` of memory - if self.memory: - _dict["memory"] = self.memory.to_dict() - # override the default output from pydantic by calling `to_dict()` of storage - if self.storage: - _dict["storage"] = self.storage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelEndpointResourceState from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "cpus": Cpus.from_dict(obj["cpus"]) if obj.get("cpus") is not None else None, - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "memory": Memory.from_dict(obj["memory"]) if obj.get("memory") is not None else None, - "optimize_costs": obj.get("optimize_costs"), - "storage": Storage.from_dict(obj["storage"]) if obj.get("storage") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/model_endpoint_status.py b/launch/api_client/models/model_endpoint_status.py deleted file mode 100644 index a83676e2..00000000 --- a/launch/api_client/models/model_endpoint_status.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class ModelEndpointStatus(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - READY = "READY" - UPDATE_PENDING = "UPDATE_PENDING" - UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" - UPDATE_FAILED = "UPDATE_FAILED" - DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of ModelEndpointStatus from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/model_endpoint_type.py b/launch/api_client/models/model_endpoint_type.py deleted file mode 100644 index 267c80c5..00000000 --- a/launch/api_client/models/model_endpoint_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class ModelEndpointType(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - ASYNC = "async" - SYNC = "sync" - STREAMING = "streaming" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of ModelEndpointType from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/pytorch_framework.py b/launch/api_client/models/pytorch_framework.py deleted file mode 100644 index 1b291e53..00000000 --- a/launch/api_client/models/pytorch_framework.py +++ /dev/null @@ -1,93 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing_extensions import Self - - -class PytorchFramework(BaseModel): - """ - This is the entity-layer class for a Pytorch framework specification. - """ # noqa: E501 - - framework_type: StrictStr - pytorch_image_tag: StrictStr - __properties: ClassVar[List[str]] = ["framework_type", "pytorch_image_tag"] - - @field_validator("framework_type") - def framework_type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["pytorch"]): - raise ValueError("must be one of enum values ('pytorch')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of PytorchFramework from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of PytorchFramework from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"framework_type": obj.get("framework_type"), "pytorch_image_tag": obj.get("pytorch_image_tag")} - ) - return _obj diff --git a/launch/api_client/models/quantization.py b/launch/api_client/models/quantization.py deleted file mode 100644 index 75c54cc7..00000000 --- a/launch/api_client/models/quantization.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class Quantization(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - BITSANDBYTES = "bitsandbytes" - AWQ = "awq" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of Quantization from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/runnable_image_flavor.py b/launch/api_client/models/runnable_image_flavor.py deleted file mode 100644 index f3ee05c5..00000000 --- a/launch/api_client/models/runnable_image_flavor.py +++ /dev/null @@ -1,137 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import ( - BaseModel, - ConfigDict, - StrictInt, - StrictStr, - field_validator, -) -from typing_extensions import Self - - -class RunnableImageFlavor(BaseModel): - """ - This is the entity-layer class for the Model Bundle flavor of a runnable image. - """ # noqa: E501 - - command: List[StrictStr] - env: Optional[Dict[str, StrictStr]] = None - flavor: StrictStr - healthcheck_route: Optional[StrictStr] = "/readyz" - predict_route: Optional[StrictStr] = "/predict" - protocol: StrictStr - readiness_initial_delay_seconds: Optional[StrictInt] = 120 - repository: StrictStr - tag: StrictStr - __properties: ClassVar[List[str]] = [ - "command", - "env", - "flavor", - "healthcheck_route", - "predict_route", - "protocol", - "readiness_initial_delay_seconds", - "repository", - "tag", - ] - - @field_validator("flavor") - def flavor_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["runnable_image"]): - raise ValueError("must be one of enum values ('runnable_image')") - return value - - @field_validator("protocol") - def protocol_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["http"]): - raise ValueError("must be one of enum values ('http')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of RunnableImageFlavor from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of RunnableImageFlavor from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "command": obj.get("command"), - "env": obj.get("env"), - "flavor": obj.get("flavor"), - "healthcheck_route": obj.get("healthcheck_route") - if obj.get("healthcheck_route") is not None - else "/readyz", - "predict_route": obj.get("predict_route") if obj.get("predict_route") is not None else "/predict", - "protocol": obj.get("protocol"), - "readiness_initial_delay_seconds": obj.get("readiness_initial_delay_seconds") - if obj.get("readiness_initial_delay_seconds") is not None - else 120, - "repository": obj.get("repository"), - "tag": obj.get("tag"), - } - ) - return _obj diff --git a/launch/api_client/models/storage.py b/launch/api_client/models/storage.py deleted file mode 100644 index 97edb8bd..00000000 --- a/launch/api_client/models/storage.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from inspect import getfullargspec -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictFloat, - StrictInt, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -STORAGE_ANY_OF_SCHEMAS = ["float", "int", "str"] - - -class Storage(BaseModel): - """ - Storage - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: int - anyof_schema_2_validator: Optional[StrictInt] = None - # data type: float - anyof_schema_3_validator: Optional[Union[StrictFloat, StrictInt]] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[float, int, str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"float", "int", "str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = Storage.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: int - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: float - try: - instance.anyof_schema_3_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in Storage with anyOf schemas: float, int, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into int - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into float - try: - # validation - instance.anyof_schema_3_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_3_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into Storage with anyOf schemas: float, int, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], float, int, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/stream_error.py b/launch/api_client/models/stream_error.py deleted file mode 100644 index a161299c..00000000 --- a/launch/api_client/models/stream_error.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictInt -from typing_extensions import Self - -from launch.api_client.models.stream_error_content import StreamErrorContent - - -class StreamError(BaseModel): - """ - Error object for a stream prompt completion task. - """ # noqa: E501 - - content: StreamErrorContent - status_code: StrictInt - __properties: ClassVar[List[str]] = ["content", "status_code"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of StreamError from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of content - if self.content: - _dict["content"] = self.content.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of StreamError from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "content": StreamErrorContent.from_dict(obj["content"]) if obj.get("content") is not None else None, - "status_code": obj.get("status_code"), - } - ) - return _obj diff --git a/launch/api_client/models/stream_error_content.py b/launch/api_client/models/stream_error_content.py deleted file mode 100644 index 4aff6fe6..00000000 --- a/launch/api_client/models/stream_error_content.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class StreamErrorContent(BaseModel): - """ - StreamErrorContent - """ # noqa: E501 - - error: StrictStr - timestamp: StrictStr - __properties: ClassVar[List[str]] = ["error", "timestamp"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of StreamErrorContent from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of StreamErrorContent from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"error": obj.get("error"), "timestamp": obj.get("timestamp")}) - return _obj diff --git a/launch/api_client/models/streaming_enhanced_runnable_image_flavor.py b/launch/api_client/models/streaming_enhanced_runnable_image_flavor.py deleted file mode 100644 index 3857ebbe..00000000 --- a/launch/api_client/models/streaming_enhanced_runnable_image_flavor.py +++ /dev/null @@ -1,145 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import ( - BaseModel, - ConfigDict, - StrictInt, - StrictStr, - field_validator, -) -from typing_extensions import Self - - -class StreamingEnhancedRunnableImageFlavor(BaseModel): - """ - For deployments that expose a streaming route in a container. - """ # noqa: E501 - - command: Optional[List[StrictStr]] = None - env: Optional[Dict[str, StrictStr]] = None - flavor: StrictStr - healthcheck_route: Optional[StrictStr] = "/readyz" - predict_route: Optional[StrictStr] = "/predict" - protocol: StrictStr - readiness_initial_delay_seconds: Optional[StrictInt] = 120 - repository: StrictStr - streaming_command: List[StrictStr] - streaming_predict_route: Optional[StrictStr] = "/stream" - tag: StrictStr - __properties: ClassVar[List[str]] = [ - "command", - "env", - "flavor", - "healthcheck_route", - "predict_route", - "protocol", - "readiness_initial_delay_seconds", - "repository", - "streaming_command", - "streaming_predict_route", - "tag", - ] - - @field_validator("flavor") - def flavor_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["streaming_enhanced_runnable_image"]): - raise ValueError("must be one of enum values ('streaming_enhanced_runnable_image')") - return value - - @field_validator("protocol") - def protocol_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["http"]): - raise ValueError("must be one of enum values ('http')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of StreamingEnhancedRunnableImageFlavor from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of StreamingEnhancedRunnableImageFlavor from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "command": obj.get("command"), - "env": obj.get("env"), - "flavor": obj.get("flavor"), - "healthcheck_route": obj.get("healthcheck_route") - if obj.get("healthcheck_route") is not None - else "/readyz", - "predict_route": obj.get("predict_route") if obj.get("predict_route") is not None else "/predict", - "protocol": obj.get("protocol"), - "readiness_initial_delay_seconds": obj.get("readiness_initial_delay_seconds") - if obj.get("readiness_initial_delay_seconds") is not None - else 120, - "repository": obj.get("repository"), - "streaming_command": obj.get("streaming_command"), - "streaming_predict_route": obj.get("streaming_predict_route") - if obj.get("streaming_predict_route") is not None - else "/stream", - "tag": obj.get("tag"), - } - ) - return _obj diff --git a/launch/api_client/models/sync_endpoint_predict_v1_request.py b/launch/api_client/models/sync_endpoint_predict_v1_request.py deleted file mode 100644 index 17b4bf42..00000000 --- a/launch/api_client/models/sync_endpoint_predict_v1_request.py +++ /dev/null @@ -1,130 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictFloat, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - -from launch.api_client.models.callback_auth import CallbackAuth - - -class SyncEndpointPredictV1Request(BaseModel): - """ - SyncEndpointPredictV1Request - """ # noqa: E501 - - args: Optional[Any] = None - callback_auth: Optional[CallbackAuth] = None - callback_url: Optional[StrictStr] = None - cloudpickle: Optional[StrictStr] = None - num_retries: Optional[Annotated[int, Field(strict=True, ge=0)]] = None - return_pickled: Optional[StrictBool] = False - timeout_seconds: Optional[Union[StrictFloat, StrictInt]] = None - url: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "num_retries", - "return_pickled", - "timeout_seconds", - "url", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of SyncEndpointPredictV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of callback_auth - if self.callback_auth: - _dict["callback_auth"] = self.callback_auth.to_dict() - # set to None if args (nullable) is None - # and model_fields_set contains the field - if self.args is None and "args" in self.model_fields_set: - _dict["args"] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of SyncEndpointPredictV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "args": obj.get("args"), - "callback_auth": CallbackAuth.from_dict(obj["callback_auth"]) - if obj.get("callback_auth") is not None - else None, - "callback_url": obj.get("callback_url"), - "cloudpickle": obj.get("cloudpickle"), - "num_retries": obj.get("num_retries"), - "return_pickled": obj.get("return_pickled") if obj.get("return_pickled") is not None else False, - "timeout_seconds": obj.get("timeout_seconds"), - "url": obj.get("url"), - } - ) - return _obj diff --git a/launch/api_client/models/sync_endpoint_predict_v1_response.py b/launch/api_client/models/sync_endpoint_predict_v1_response.py deleted file mode 100644 index 7c1051d0..00000000 --- a/launch/api_client/models/sync_endpoint_predict_v1_response.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.task_status import TaskStatus - - -class SyncEndpointPredictV1Response(BaseModel): - """ - SyncEndpointPredictV1Response - """ # noqa: E501 - - result: Optional[Any] = None - status: TaskStatus - traceback: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["result", "status", "traceback"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of SyncEndpointPredictV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # set to None if result (nullable) is None - # and model_fields_set contains the field - if self.result is None and "result" in self.model_fields_set: - _dict["result"] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of SyncEndpointPredictV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"result": obj.get("result"), "status": obj.get("status"), "traceback": obj.get("traceback")} - ) - return _obj diff --git a/launch/api_client/models/task_status.py b/launch/api_client/models/task_status.py deleted file mode 100644 index 7e8d91e4..00000000 --- a/launch/api_client/models/task_status.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -from enum import Enum - -from typing_extensions import Self - - -class TaskStatus(str, Enum): - """ - An enumeration. - """ - - """ - allowed enum values - """ - PENDING = "PENDING" - STARTED = "STARTED" - SUCCESS = "SUCCESS" - FAILURE = "FAILURE" - UNDEFINED = "UNDEFINED" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of TaskStatus from a JSON string""" - return cls(json.loads(json_str)) diff --git a/launch/api_client/models/tensorflow_framework.py b/launch/api_client/models/tensorflow_framework.py deleted file mode 100644 index b1d19110..00000000 --- a/launch/api_client/models/tensorflow_framework.py +++ /dev/null @@ -1,93 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing_extensions import Self - - -class TensorflowFramework(BaseModel): - """ - This is the entity-layer class for a Tensorflow framework specification. - """ # noqa: E501 - - framework_type: StrictStr - tensorflow_version: StrictStr - __properties: ClassVar[List[str]] = ["framework_type", "tensorflow_version"] - - @field_validator("framework_type") - def framework_type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["tensorflow"]): - raise ValueError("must be one of enum values ('tensorflow')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of TensorflowFramework from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of TensorflowFramework from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"framework_type": obj.get("framework_type"), "tensorflow_version": obj.get("tensorflow_version")} - ) - return _obj diff --git a/launch/api_client/models/token_output.py b/launch/api_client/models/token_output.py deleted file mode 100644 index b6274cfb..00000000 --- a/launch/api_client/models/token_output.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr -from typing_extensions import Self - - -class TokenOutput(BaseModel): - """ - TokenOutput - """ # noqa: E501 - - log_prob: Union[StrictFloat, StrictInt] - token: StrictStr - __properties: ClassVar[List[str]] = ["log_prob", "token"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of TokenOutput from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of TokenOutput from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"log_prob": obj.get("log_prob"), "token": obj.get("token")}) - return _obj diff --git a/launch/api_client/models/tool_config.py b/launch/api_client/models/tool_config.py deleted file mode 100644 index 4a8df6bd..00000000 --- a/launch/api_client/models/tool_config.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictInt, StrictStr -from typing_extensions import Self - - -class ToolConfig(BaseModel): - """ - Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. - """ # noqa: E501 - - execution_timeout_seconds: Optional[StrictInt] = 60 - max_iterations: Optional[StrictInt] = 10 - name: StrictStr - should_retry_on_error: Optional[StrictBool] = True - __properties: ClassVar[List[str]] = ["execution_timeout_seconds", "max_iterations", "name", "should_retry_on_error"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ToolConfig from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ToolConfig from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "execution_timeout_seconds": obj.get("execution_timeout_seconds") - if obj.get("execution_timeout_seconds") is not None - else 60, - "max_iterations": obj.get("max_iterations") if obj.get("max_iterations") is not None else 10, - "name": obj.get("name"), - "should_retry_on_error": obj.get("should_retry_on_error") - if obj.get("should_retry_on_error") is not None - else True, - } - ) - return _obj diff --git a/launch/api_client/models/triton_enhanced_runnable_image_flavor.py b/launch/api_client/models/triton_enhanced_runnable_image_flavor.py deleted file mode 100644 index acce6e9a..00000000 --- a/launch/api_client/models/triton_enhanced_runnable_image_flavor.py +++ /dev/null @@ -1,161 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - StrictFloat, - StrictInt, - StrictStr, - field_validator, -) -from typing_extensions import Self - - -class TritonEnhancedRunnableImageFlavor(BaseModel): - """ - For deployments that require tritonserver running in a container. - """ # noqa: E501 - - command: List[StrictStr] - env: Optional[Dict[str, StrictStr]] = None - flavor: StrictStr - healthcheck_route: Optional[StrictStr] = "/readyz" - predict_route: Optional[StrictStr] = "/predict" - protocol: StrictStr - readiness_initial_delay_seconds: Optional[StrictInt] = 120 - repository: StrictStr - tag: StrictStr - triton_commit_tag: StrictStr - triton_memory: Optional[StrictStr] = None - triton_model_replicas: Optional[Dict[str, StrictStr]] = None - triton_model_repository: StrictStr - triton_num_cpu: Union[StrictFloat, StrictInt] - triton_readiness_initial_delay_seconds: Optional[StrictInt] = 300 - triton_storage: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "command", - "env", - "flavor", - "healthcheck_route", - "predict_route", - "protocol", - "readiness_initial_delay_seconds", - "repository", - "tag", - "triton_commit_tag", - "triton_memory", - "triton_model_replicas", - "triton_model_repository", - "triton_num_cpu", - "triton_readiness_initial_delay_seconds", - "triton_storage", - ] - - @field_validator("flavor") - def flavor_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["triton_enhanced_runnable_image"]): - raise ValueError("must be one of enum values ('triton_enhanced_runnable_image')") - return value - - @field_validator("protocol") - def protocol_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["http"]): - raise ValueError("must be one of enum values ('http')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of TritonEnhancedRunnableImageFlavor from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of TritonEnhancedRunnableImageFlavor from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "command": obj.get("command"), - "env": obj.get("env"), - "flavor": obj.get("flavor"), - "healthcheck_route": obj.get("healthcheck_route") - if obj.get("healthcheck_route") is not None - else "/readyz", - "predict_route": obj.get("predict_route") if obj.get("predict_route") is not None else "/predict", - "protocol": obj.get("protocol"), - "readiness_initial_delay_seconds": obj.get("readiness_initial_delay_seconds") - if obj.get("readiness_initial_delay_seconds") is not None - else 120, - "repository": obj.get("repository"), - "tag": obj.get("tag"), - "triton_commit_tag": obj.get("triton_commit_tag"), - "triton_memory": obj.get("triton_memory"), - "triton_model_replicas": obj.get("triton_model_replicas"), - "triton_model_repository": obj.get("triton_model_repository"), - "triton_num_cpu": obj.get("triton_num_cpu"), - "triton_readiness_initial_delay_seconds": obj.get("triton_readiness_initial_delay_seconds") - if obj.get("triton_readiness_initial_delay_seconds") is not None - else 300, - "triton_storage": obj.get("triton_storage"), - } - ) - return _obj diff --git a/launch/api_client/models/update_batch_job_v1_request.py b/launch/api_client/models/update_batch_job_v1_request.py deleted file mode 100644 index 7e670fa5..00000000 --- a/launch/api_client/models/update_batch_job_v1_request.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class UpdateBatchJobV1Request(BaseModel): - """ - UpdateBatchJobV1Request - """ # noqa: E501 - - cancel: StrictBool - __properties: ClassVar[List[str]] = ["cancel"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateBatchJobV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateBatchJobV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"cancel": obj.get("cancel")}) - return _obj diff --git a/launch/api_client/models/update_batch_job_v1_response.py b/launch/api_client/models/update_batch_job_v1_response.py deleted file mode 100644 index e9a04367..00000000 --- a/launch/api_client/models/update_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class UpdateBatchJobV1Response(BaseModel): - """ - UpdateBatchJobV1Response - """ # noqa: E501 - - success: StrictBool - __properties: ClassVar[List[str]] = ["success"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateBatchJobV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateBatchJobV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"success": obj.get("success")}) - return _obj diff --git a/launch/api_client/models/update_docker_image_batch_job_v1_request.py b/launch/api_client/models/update_docker_image_batch_job_v1_request.py deleted file mode 100644 index ac643694..00000000 --- a/launch/api_client/models/update_docker_image_batch_job_v1_request.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class UpdateDockerImageBatchJobV1Request(BaseModel): - """ - UpdateDockerImageBatchJobV1Request - """ # noqa: E501 - - cancel: StrictBool - __properties: ClassVar[List[str]] = ["cancel"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateDockerImageBatchJobV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateDockerImageBatchJobV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"cancel": obj.get("cancel")}) - return _obj diff --git a/launch/api_client/models/update_docker_image_batch_job_v1_response.py b/launch/api_client/models/update_docker_image_batch_job_v1_response.py deleted file mode 100644 index 60c7504a..00000000 --- a/launch/api_client/models/update_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class UpdateDockerImageBatchJobV1Response(BaseModel): - """ - UpdateDockerImageBatchJobV1Response - """ # noqa: E501 - - success: StrictBool - __properties: ClassVar[List[str]] = ["success"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateDockerImageBatchJobV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateDockerImageBatchJobV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"success": obj.get("success")}) - return _obj diff --git a/launch/api_client/models/update_llm_model_endpoint_v1_request.py b/launch/api_client/models/update_llm_model_endpoint_v1_request.py deleted file mode 100644 index d966a438..00000000 --- a/launch/api_client/models/update_llm_model_endpoint_v1_request.py +++ /dev/null @@ -1,187 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - -from launch.api_client.models.callback_auth import CallbackAuth -from launch.api_client.models.cpus import Cpus -from launch.api_client.models.gpu_type import GpuType -from launch.api_client.models.llm_source import LLMSource -from launch.api_client.models.memory import Memory -from launch.api_client.models.quantization import Quantization -from launch.api_client.models.storage import Storage - - -class UpdateLLMModelEndpointV1Request(BaseModel): - """ - UpdateLLMModelEndpointV1Request - """ # noqa: E501 - - billing_tags: Optional[Dict[str, Any]] = None - checkpoint_path: Optional[StrictStr] = None - cpus: Optional[Cpus] = None - default_callback_auth: Optional[CallbackAuth] = None - default_callback_url: Optional[Annotated[str, Field(min_length=1, strict=True, max_length=2083)]] = None - gpu_type: Optional[GpuType] = None - gpus: Optional[StrictInt] = None - high_priority: Optional[StrictBool] = None - inference_framework_image_tag: Optional[StrictStr] = None - labels: Optional[Dict[str, StrictStr]] = None - max_workers: Optional[StrictInt] = None - memory: Optional[Memory] = None - metadata: Optional[Dict[str, Any]] = None - min_workers: Optional[StrictInt] = None - model_name: Optional[StrictStr] = None - num_shards: Optional[StrictInt] = None - optimize_costs: Optional[StrictBool] = None - per_worker: Optional[StrictInt] = None - post_inference_hooks: Optional[List[StrictStr]] = None - prewarm: Optional[StrictBool] = None - public_inference: Optional[StrictBool] = None - quantize: Optional[Quantization] = None - source: Optional[LLMSource] = None - storage: Optional[Storage] = None - __properties: ClassVar[List[str]] = [ - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "inference_framework_image_tag", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_name", - "num_shards", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateLLMModelEndpointV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of cpus - if self.cpus: - _dict["cpus"] = self.cpus.to_dict() - # override the default output from pydantic by calling `to_dict()` of default_callback_auth - if self.default_callback_auth: - _dict["default_callback_auth"] = self.default_callback_auth.to_dict() - # override the default output from pydantic by calling `to_dict()` of memory - if self.memory: - _dict["memory"] = self.memory.to_dict() - # override the default output from pydantic by calling `to_dict()` of storage - if self.storage: - _dict["storage"] = self.storage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateLLMModelEndpointV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "billing_tags": obj.get("billing_tags"), - "checkpoint_path": obj.get("checkpoint_path"), - "cpus": Cpus.from_dict(obj["cpus"]) if obj.get("cpus") is not None else None, - "default_callback_auth": CallbackAuth.from_dict(obj["default_callback_auth"]) - if obj.get("default_callback_auth") is not None - else None, - "default_callback_url": obj.get("default_callback_url"), - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "high_priority": obj.get("high_priority"), - "inference_framework_image_tag": obj.get("inference_framework_image_tag"), - "labels": obj.get("labels"), - "max_workers": obj.get("max_workers"), - "memory": Memory.from_dict(obj["memory"]) if obj.get("memory") is not None else None, - "metadata": obj.get("metadata"), - "min_workers": obj.get("min_workers"), - "model_name": obj.get("model_name"), - "num_shards": obj.get("num_shards"), - "optimize_costs": obj.get("optimize_costs"), - "per_worker": obj.get("per_worker"), - "post_inference_hooks": obj.get("post_inference_hooks"), - "prewarm": obj.get("prewarm"), - "public_inference": obj.get("public_inference"), - "quantize": obj.get("quantize"), - "source": obj.get("source"), - "storage": Storage.from_dict(obj["storage"]) if obj.get("storage") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/update_llm_model_endpoint_v1_response.py b/launch/api_client/models/update_llm_model_endpoint_v1_response.py deleted file mode 100644 index 1fceb77a..00000000 --- a/launch/api_client/models/update_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class UpdateLLMModelEndpointV1Response(BaseModel): - """ - UpdateLLMModelEndpointV1Response - """ # noqa: E501 - - endpoint_creation_task_id: StrictStr - __properties: ClassVar[List[str]] = ["endpoint_creation_task_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateLLMModelEndpointV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateLLMModelEndpointV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"endpoint_creation_task_id": obj.get("endpoint_creation_task_id")}) - return _obj diff --git a/launch/api_client/models/update_model_endpoint_v1_request.py b/launch/api_client/models/update_model_endpoint_v1_request.py deleted file mode 100644 index ae836431..00000000 --- a/launch/api_client/models/update_model_endpoint_v1_request.py +++ /dev/null @@ -1,170 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, -) -from typing_extensions import Annotated, Self - -from launch.api_client.models.callback_auth import CallbackAuth -from launch.api_client.models.cpus import Cpus -from launch.api_client.models.gpu_type import GpuType -from launch.api_client.models.memory import Memory -from launch.api_client.models.storage import Storage - - -class UpdateModelEndpointV1Request(BaseModel): - """ - UpdateModelEndpointV1Request - """ # noqa: E501 - - billing_tags: Optional[Dict[str, Any]] = None - cpus: Optional[Cpus] = None - default_callback_auth: Optional[CallbackAuth] = None - default_callback_url: Optional[Annotated[str, Field(min_length=1, strict=True, max_length=2083)]] = None - gpu_type: Optional[GpuType] = None - gpus: Optional[Annotated[int, Field(strict=True, ge=0)]] = None - high_priority: Optional[StrictBool] = None - labels: Optional[Dict[str, StrictStr]] = None - max_workers: Optional[Annotated[int, Field(strict=True, ge=0)]] = None - memory: Optional[Memory] = None - metadata: Optional[Dict[str, Any]] = None - min_workers: Optional[Annotated[int, Field(strict=True, ge=0)]] = None - model_bundle_id: Optional[StrictStr] = None - optimize_costs: Optional[StrictBool] = None - per_worker: Optional[StrictInt] = None - post_inference_hooks: Optional[List[StrictStr]] = None - prewarm: Optional[StrictBool] = None - public_inference: Optional[StrictBool] = None - storage: Optional[Storage] = None - __properties: ClassVar[List[str]] = [ - "billing_tags", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateModelEndpointV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of cpus - if self.cpus: - _dict["cpus"] = self.cpus.to_dict() - # override the default output from pydantic by calling `to_dict()` of default_callback_auth - if self.default_callback_auth: - _dict["default_callback_auth"] = self.default_callback_auth.to_dict() - # override the default output from pydantic by calling `to_dict()` of memory - if self.memory: - _dict["memory"] = self.memory.to_dict() - # override the default output from pydantic by calling `to_dict()` of storage - if self.storage: - _dict["storage"] = self.storage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateModelEndpointV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "billing_tags": obj.get("billing_tags"), - "cpus": Cpus.from_dict(obj["cpus"]) if obj.get("cpus") is not None else None, - "default_callback_auth": CallbackAuth.from_dict(obj["default_callback_auth"]) - if obj.get("default_callback_auth") is not None - else None, - "default_callback_url": obj.get("default_callback_url"), - "gpu_type": obj.get("gpu_type"), - "gpus": obj.get("gpus"), - "high_priority": obj.get("high_priority"), - "labels": obj.get("labels"), - "max_workers": obj.get("max_workers"), - "memory": Memory.from_dict(obj["memory"]) if obj.get("memory") is not None else None, - "metadata": obj.get("metadata"), - "min_workers": obj.get("min_workers"), - "model_bundle_id": obj.get("model_bundle_id"), - "optimize_costs": obj.get("optimize_costs"), - "per_worker": obj.get("per_worker"), - "post_inference_hooks": obj.get("post_inference_hooks"), - "prewarm": obj.get("prewarm"), - "public_inference": obj.get("public_inference"), - "storage": Storage.from_dict(obj["storage"]) if obj.get("storage") is not None else None, - } - ) - return _obj diff --git a/launch/api_client/models/update_model_endpoint_v1_response.py b/launch/api_client/models/update_model_endpoint_v1_response.py deleted file mode 100644 index b41ff86a..00000000 --- a/launch/api_client/models/update_model_endpoint_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class UpdateModelEndpointV1Response(BaseModel): - """ - UpdateModelEndpointV1Response - """ # noqa: E501 - - endpoint_creation_task_id: StrictStr - __properties: ClassVar[List[str]] = ["endpoint_creation_task_id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateModelEndpointV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateModelEndpointV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"endpoint_creation_task_id": obj.get("endpoint_creation_task_id")}) - return _obj diff --git a/launch/api_client/models/update_trigger_v1_request.py b/launch/api_client/models/update_trigger_v1_request.py deleted file mode 100644 index 9ef107ba..00000000 --- a/launch/api_client/models/update_trigger_v1_request.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr -from typing_extensions import Self - - -class UpdateTriggerV1Request(BaseModel): - """ - UpdateTriggerV1Request - """ # noqa: E501 - - cron_schedule: Optional[StrictStr] = None - suspend: Optional[StrictBool] = None - __properties: ClassVar[List[str]] = ["cron_schedule", "suspend"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateTriggerV1Request from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateTriggerV1Request from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"cron_schedule": obj.get("cron_schedule"), "suspend": obj.get("suspend")}) - return _obj diff --git a/launch/api_client/models/update_trigger_v1_response.py b/launch/api_client/models/update_trigger_v1_response.py deleted file mode 100644 index b7fd1354..00000000 --- a/launch/api_client/models/update_trigger_v1_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool -from typing_extensions import Self - - -class UpdateTriggerV1Response(BaseModel): - """ - UpdateTriggerV1Response - """ # noqa: E501 - - success: StrictBool - __properties: ClassVar[List[str]] = ["success"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateTriggerV1Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateTriggerV1Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"success": obj.get("success")}) - return _obj diff --git a/launch/api_client/models/upload_file_response.py b/launch/api_client/models/upload_file_response.py deleted file mode 100644 index e9e0ba57..00000000 --- a/launch/api_client/models/upload_file_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Self - - -class UploadFileResponse(BaseModel): - """ - Response object for uploading a file. - """ # noqa: E501 - - id: StrictStr = Field(description="ID of the uploaded file.") - __properties: ClassVar[List[str]] = ["id"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UploadFileResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UploadFileResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"id": obj.get("id")}) - return _obj diff --git a/launch/api_client/models/validation_error.py b/launch/api_client/models/validation_error.py deleted file mode 100644 index 76221b8d..00000000 --- a/launch/api_client/models/validation_error.py +++ /dev/null @@ -1,104 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from launch.api_client.models.validation_error_loc_inner import ( - ValidationErrorLocInner, -) - - -class ValidationError(BaseModel): - """ - ValidationError - """ # noqa: E501 - - loc: List[ValidationErrorLocInner] - msg: StrictStr - type: StrictStr - __properties: ClassVar[List[str]] = ["loc", "msg", "type"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ValidationError from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in loc (list) - _items = [] - if self.loc: - for _item in self.loc: - if _item: - _items.append(_item.to_dict()) - _dict["loc"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ValidationError from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "loc": [ValidationErrorLocInner.from_dict(_item) for _item in obj["loc"]] - if obj.get("loc") is not None - else None, - "msg": obj.get("msg"), - "type": obj.get("type"), - } - ) - return _obj diff --git a/launch/api_client/models/validation_error_loc_inner.py b/launch/api_client/models/validation_error_loc_inner.py deleted file mode 100644 index 86bf7c93..00000000 --- a/launch/api_client/models/validation_error_loc_inner.py +++ /dev/null @@ -1,151 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from inspect import getfullargspec -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictInt, - StrictStr, - ValidationError, - field_validator, -) -from typing_extensions import Literal, Self - -VALIDATIONERRORLOCINNER_ANY_OF_SCHEMAS = ["int", "str"] - - -class ValidationErrorLocInner(BaseModel): - """ - ValidationErrorLocInner - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: int - anyof_schema_2_validator: Optional[StrictInt] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[int, str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"int", "str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`") - if kwargs: - raise ValueError("If a position argument is used, keyword arguments cannot be used.") - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = ValidationErrorLocInner.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: int - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in ValidationErrorLocInner with anyOf schemas: int, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into int - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ValidationErrorLocInner with anyOf schemas: int, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], int, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/launch/api_client/models/zip_artifact_flavor.py b/launch/api_client/models/zip_artifact_flavor.py deleted file mode 100644 index b5be75c7..00000000 --- a/launch/api_client/models/zip_artifact_flavor.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - - The version of the OpenAPI document: 1.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing_extensions import Self - -from launch.api_client.models.framework import Framework - - -class ZipArtifactFlavor(BaseModel): - """ - This is the entity-layer class for the Model Bundle flavor of a zip artifact. - """ # noqa: E501 - - app_config: Optional[Dict[str, Any]] = None - flavor: StrictStr - framework: Framework - load_model_fn_module_path: StrictStr - load_predict_fn_module_path: StrictStr - location: StrictStr - requirements: List[StrictStr] - __properties: ClassVar[List[str]] = [ - "app_config", - "flavor", - "framework", - "load_model_fn_module_path", - "load_predict_fn_module_path", - "location", - "requirements", - ] - - @field_validator("flavor") - def flavor_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["zip_artifact"]): - raise ValueError("must be one of enum values ('zip_artifact')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ZipArtifactFlavor from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of framework - if self.framework: - _dict["framework"] = self.framework.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ZipArtifactFlavor from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "app_config": obj.get("app_config"), - "flavor": obj.get("flavor"), - "framework": Framework.from_dict(obj["framework"]) if obj.get("framework") is not None else None, - "load_model_fn_module_path": obj.get("load_model_fn_module_path"), - "load_predict_fn_module_path": obj.get("load_predict_fn_module_path"), - "location": obj.get("location"), - "requirements": obj.get("requirements"), - } - ) - return _obj diff --git a/launch/api_client/paths/__init__.py b/launch/api_client/paths/__init__.py deleted file mode 100644 index 460b9fd4..00000000 --- a/launch/api_client/paths/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.apis.path_to_api import path_to_api - -import enum - - -class PathValues(str, enum.Enum): - V1_BATCHJOBS = "/v1/batch-jobs" - V1_BATCHJOBS_BATCH_JOB_ID = "/v1/batch-jobs/{batch_job_id}" - V1_DOCKERIMAGEBATCHJOBS = "/v1/docker-image-batch-jobs" - V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID = "/v1/docker-image-batch-jobs/{batch_job_id}" - V1_ASYNCTASKS = "/v1/async-tasks" - V1_ASYNCTASKS_TASK_ID = "/v1/async-tasks/{task_id}" - V1_SYNCTASKS = "/v1/sync-tasks" - V1_STREAMINGTASKS = "/v1/streaming-tasks" - V1_MODELBUNDLES = "/v1/model-bundles" - V1_MODELBUNDLES_CLONEWITHCHANGES = "/v1/model-bundles/clone-with-changes" - V1_MODELBUNDLES_LATEST = "/v1/model-bundles/latest" - V1_MODELBUNDLES_MODEL_BUNDLE_ID = "/v1/model-bundles/{model_bundle_id}" - V2_MODELBUNDLES = "/v2/model-bundles" - V2_MODELBUNDLES_CLONEWITHCHANGES = "/v2/model-bundles/clone-with-changes" - V2_MODELBUNDLES_LATEST = "/v2/model-bundles/latest" - V2_MODELBUNDLES_MODEL_BUNDLE_ID = "/v2/model-bundles/{model_bundle_id}" - V1_MODELENDPOINTS = "/v1/model-endpoints" - V1_MODELENDPOINTS_MODEL_ENDPOINT_ID = "/v1/model-endpoints/{model_endpoint_id}" - V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART = "/v1/model-endpoints/{model_endpoint_id}/restart" - V1_MODELENDPOINTSSCHEMA_JSON = "/v1/model-endpoints-schema.json" - V1_MODELENDPOINTSAPI = "/v1/model-endpoints-api" - V1_DOCKERIMAGEBATCHJOBBUNDLES = "/v1/docker-image-batch-job-bundles" - V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST = "/v1/docker-image-batch-job-bundles/latest" - V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID = "/v1/docker-image-batch-job-bundles/{docker_image_batch_job_bundle_id}" - V1_LLM_MODELENDPOINTS = "/v1/llm/model-endpoints" - V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME = "/v1/llm/model-endpoints/{model_endpoint_name}" - V1_LLM_COMPLETIONSSYNC = "/v1/llm/completions-sync" - V1_LLM_COMPLETIONSSTREAM = "/v1/llm/completions-stream" - V1_LLM_FINETUNES = "/v1/llm/fine-tunes" - V1_LLM_FINETUNES_FINE_TUNE_ID = "/v1/llm/fine-tunes/{fine_tune_id}" - V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL = "/v1/llm/fine-tunes/{fine_tune_id}/cancel" - V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS = "/v1/llm/fine-tunes/{fine_tune_id}/events" - V1_LLM_MODELENDPOINTS_DOWNLOAD = "/v1/llm/model-endpoints/download" - V1_LLM_BATCHCOMPLETIONS = "/v1/llm/batch-completions" - V1_FILES = "/v1/files" - V1_FILES_FILE_ID = "/v1/files/{file_id}" - V1_FILES_FILE_ID_CONTENT = "/v1/files/{file_id}/content" - V1_TRIGGERS = "/v1/triggers" - V1_TRIGGERS_TRIGGER_ID = "/v1/triggers/{trigger_id}" - V2_BATCHCOMPLETIONS = "/v2/batch-completions" - V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID = "/v2/batch-completions/{batch_completion_id}" - V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL = "/v2/batch-completions/{batch_completion_id}/actions/cancel" - V2_CHAT_COMPLETIONS = "/v2/chat/completions" - V2_COMPLETIONS = "/v2/completions" - HEALTHCHECK = "/healthcheck" - HEALTHZ = "/healthz" - READYZ = "/readyz" diff --git a/launch/api_client/paths/healthcheck/__init__.py b/launch/api_client/paths/healthcheck/__init__.py deleted file mode 100644 index b0eea9c5..00000000 --- a/launch/api_client/paths/healthcheck/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.healthcheck import Api - -from launch.api_client.paths import PathValues - -path = PathValues.HEALTHCHECK \ No newline at end of file diff --git a/launch/api_client/paths/healthcheck/get.py b/launch/api_client/paths/healthcheck/get.py deleted file mode 100644 index 62989a36..00000000 --- a/launch/api_client/paths/healthcheck/get.py +++ /dev/null @@ -1,235 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions - -from . import path - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _healthcheck_healthcheck_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _healthcheck_healthcheck_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _healthcheck_healthcheck_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _healthcheck_healthcheck_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Healthcheck - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class HealthcheckHealthcheckGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def healthcheck_healthcheck_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def healthcheck_healthcheck_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def healthcheck_healthcheck_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def healthcheck_healthcheck_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthcheck_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthcheck_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/healthz/__init__.py b/launch/api_client/paths/healthz/__init__.py deleted file mode 100644 index b4e79532..00000000 --- a/launch/api_client/paths/healthz/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.healthz import Api - -from launch.api_client.paths import PathValues - -path = PathValues.HEALTHZ \ No newline at end of file diff --git a/launch/api_client/paths/healthz/get.py b/launch/api_client/paths/healthz/get.py deleted file mode 100644 index 4ad812ce..00000000 --- a/launch/api_client/paths/healthz/get.py +++ /dev/null @@ -1,235 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions - -from . import path - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _healthcheck_healthz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _healthcheck_healthz_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _healthcheck_healthz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _healthcheck_healthz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Healthcheck - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class HealthcheckHealthzGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def healthcheck_healthz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def healthcheck_healthz_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def healthcheck_healthz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def healthcheck_healthz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/readyz/__init__.py b/launch/api_client/paths/readyz/__init__.py deleted file mode 100644 index 4b5a5af4..00000000 --- a/launch/api_client/paths/readyz/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.readyz import Api - -from launch.api_client.paths import PathValues - -path = PathValues.READYZ \ No newline at end of file diff --git a/launch/api_client/paths/readyz/get.py b/launch/api_client/paths/readyz/get.py deleted file mode 100644 index 1e946596..00000000 --- a/launch/api_client/paths/readyz/get.py +++ /dev/null @@ -1,235 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions - -from . import path - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _healthcheck_readyz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _healthcheck_readyz_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _healthcheck_readyz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _healthcheck_readyz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Healthcheck - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class HealthcheckReadyzGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def healthcheck_readyz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def healthcheck_readyz_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def healthcheck_readyz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def healthcheck_readyz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_readyz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_readyz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_async_tasks/__init__.py b/launch/api_client/paths/v1_async_tasks/__init__.py deleted file mode 100644 index 72df600f..00000000 --- a/launch/api_client/paths/v1_async_tasks/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_async_tasks import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_ASYNCTASKS \ No newline at end of file diff --git a/launch/api_client/paths/v1_async_tasks/post.py b/launch/api_client/paths/v1_async_tasks/post.py deleted file mode 100644 index f91beb6c..00000000 --- a/launch/api_client/paths/v1_async_tasks/post.py +++ /dev/null @@ -1,419 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_async_task_v1_response import ( - CreateAsyncTaskV1Response, -) -from launch.api_client.model.endpoint_predict_v1_request import ( - EndpointPredictV1Request, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Query params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_endpoint_id = api_client.QueryParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointIdSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = EndpointPredictV1Request - - -request_body_endpoint_predict_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateAsyncTaskV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Async Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_endpoint_id, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateAsyncInferenceTaskV1AsyncTasksPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_async_inference_task_v1_async_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_async_inference_task_v1_async_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_async_tasks_task_id/__init__.py b/launch/api_client/paths/v1_async_tasks_task_id/__init__.py deleted file mode 100644 index ef09b1e6..00000000 --- a/launch/api_client/paths/v1_async_tasks_task_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_async_tasks_task_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_ASYNCTASKS_TASK_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_async_tasks_task_id/get.py b/launch/api_client/paths/v1_async_tasks_task_id/get.py deleted file mode 100644 index c0dec01b..00000000 --- a/launch/api_client/paths/v1_async_tasks_task_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_async_task_v1_response import ( - GetAsyncTaskV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -TaskIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'task_id': typing.Union[TaskIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_task_id = api_client.PathParameter( - name="task_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TaskIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetAsyncTaskV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Async Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_task_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetAsyncInferenceTaskV1AsyncTasksTaskIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_async_inference_task_v1_async_tasks_task_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_async_inference_task_v1_async_tasks_task_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_batch_jobs/__init__.py b/launch/api_client/paths/v1_batch_jobs/__init__.py deleted file mode 100644 index 65e727af..00000000 --- a/launch/api_client/paths/v1_batch_jobs/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_batch_jobs import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_BATCHJOBS \ No newline at end of file diff --git a/launch/api_client/paths/v1_batch_jobs/post.py b/launch/api_client/paths/v1_batch_jobs/post.py deleted file mode 100644 index d87f8229..00000000 --- a/launch/api_client/paths/v1_batch_jobs/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_batch_job_v1_request import ( - CreateBatchJobV1Request, -) -from launch.api_client.model.create_batch_job_v1_response import ( - CreateBatchJobV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateBatchJobV1Request - - -request_body_create_batch_job_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateBatchJobV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_batch_job_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateBatchJobV1BatchJobsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_batch_job_v1_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_batch_job_v1_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/__init__.py b/launch/api_client/paths/v1_batch_jobs_batch_job_id/__init__.py deleted file mode 100644 index 0acdf21c..00000000 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_batch_jobs_batch_job_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_BATCHJOBS_BATCH_JOB_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.py b/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.py deleted file mode 100644 index ad9a632d..00000000 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_batch_job_v1_response import ( - GetBatchJobV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetBatchJobV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_batch_job_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetBatchJobV1BatchJobsBatchJobIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.py b/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.py deleted file mode 100644 index 0dd75ff8..00000000 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.py +++ /dev/null @@ -1,418 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.update_batch_job_v1_request import ( - UpdateBatchJobV1Request, -) -from launch.api_client.model.update_batch_job_v1_response import ( - UpdateBatchJobV1Response, -) - -from . import path - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateBatchJobV1Request - - -request_body_update_batch_job_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = UpdateBatchJobV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_batch_job_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_update_batch_job_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='put'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class UpdateBatchJobV1BatchJobsBatchJobIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/__init__.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles/__init__.py deleted file mode 100644 index 588a7c71..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_docker_image_batch_job_bundles import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.py deleted file mode 100644 index 6e74c613..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.py +++ /dev/null @@ -1,347 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.list_docker_image_batch_job_bundle_v1_response import ( - ListDockerImageBatchJobBundleV1Response, -) -from launch.api_client.model.model_bundle_order_by import ModelBundleOrderBy - -from . import path - -# Query params - - -class BundleNameSchema( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'BundleNameSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) -OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - 'bundle_name': typing.Union[BundleNameSchema, None, str, ], - 'order_by': typing.Union[OrderBySchema, ], - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_bundle_name = api_client.QueryParameter( - name="bundle_name", - style=api_client.ParameterStyle.FORM, - schema=BundleNameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListDockerImageBatchJobBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Docker Image Batch Job Model Bundles - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_bundle_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListDockerImageBatchJobModelBundlesV1DockerImageBatchJobBundlesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.py deleted file mode 100644 index 14c33e8e..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_docker_image_batch_job_bundle_v1_request import ( - CreateDockerImageBatchJobBundleV1Request, -) -from launch.api_client.model.create_docker_image_batch_job_bundle_v1_response import ( - CreateDockerImageBatchJobBundleV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateDockerImageBatchJobBundleV1Request - - -request_body_create_docker_image_batch_job_bundle_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateDockerImageBatchJobBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Docker Image Batch Job Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_docker_image_batch_job_bundle_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateDockerImageBatchJobBundleV1DockerImageBatchJobBundlesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py deleted file mode 100644 index 501aeb6d..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.py deleted file mode 100644 index 5b5faa24..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -DockerImageBatchJobBundleIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'docker_image_batch_job_bundle_id': typing.Union[DockerImageBatchJobBundleIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_docker_image_batch_job_bundle_id = api_client.PathParameter( - name="docker_image_batch_job_bundle_id", - style=api_client.ParameterStyle.SIMPLE, - schema=DockerImageBatchJobBundleIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = DockerImageBatchJobBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Docker Image Batch Job Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_docker_image_batch_job_bundle_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetDockerImageBatchJobModelBundleV1DockerImageBatchJobBundlesDockerImageBatchJobBundleIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/__init__.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/__init__.py deleted file mode 100644 index 07e8fff3..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_docker_image_batch_job_bundles_latest import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.py deleted file mode 100644 index 44566b78..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.py +++ /dev/null @@ -1,319 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Query params -BundleNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'bundle_name': typing.Union[BundleNameSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_bundle_name = api_client.QueryParameter( - name="bundle_name", - style=api_client.ParameterStyle.FORM, - schema=BundleNameSchema, - required=True, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = DockerImageBatchJobBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Latest Docker Image Batch Job Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_bundle_name, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetLatestDockerImageBatchJobBundleV1DockerImageBatchJobBundlesLatestGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/__init__.py b/launch/api_client/paths/v1_docker_image_batch_jobs/__init__.py deleted file mode 100644 index d9af25cf..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_docker_image_batch_jobs import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_DOCKERIMAGEBATCHJOBS \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/get.py b/launch/api_client/paths/v1_docker_image_batch_jobs/get.py deleted file mode 100644 index 10a760d7..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/get.py +++ /dev/null @@ -1,337 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.list_docker_image_batch_jobs_v1_response import ( - ListDockerImageBatchJobsV1Response, -) - -from . import path - -# Query params - - -class TriggerIdSchema( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'TriggerIdSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - 'trigger_id': typing.Union[TriggerIdSchema, None, str, ], - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_trigger_id = api_client.QueryParameter( - name="trigger_id", - style=api_client.ParameterStyle.FORM, - schema=TriggerIdSchema, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListDockerImageBatchJobsV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Docker Image Batch Jobs - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_trigger_id, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListDockerImageBatchJobsV1DockerImageBatchJobsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/post.py b/launch/api_client/paths/v1_docker_image_batch_jobs/post.py deleted file mode 100644 index efa8405f..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_docker_image_batch_job_v1_request import ( - CreateDockerImageBatchJobV1Request, -) -from launch.api_client.model.create_docker_image_batch_job_v1_response import ( - CreateDockerImageBatchJobV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateDockerImageBatchJobV1Request - - -request_body_create_docker_image_batch_job_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateDockerImageBatchJobV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Docker Image Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_docker_image_batch_job_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateDockerImageBatchJobV1DockerImageBatchJobsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/__init__.py b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/__init__.py deleted file mode 100644 index c085c391..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.py b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.py deleted file mode 100644 index 0b6de78f..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_docker_image_batch_job_v1_response import ( - GetDockerImageBatchJobV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetDockerImageBatchJobV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Docker Image Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_batch_job_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.py b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.py deleted file mode 100644 index 472dc100..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.py +++ /dev/null @@ -1,418 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.update_docker_image_batch_job_v1_request import ( - UpdateDockerImageBatchJobV1Request, -) -from launch.api_client.model.update_docker_image_batch_job_v1_response import ( - UpdateDockerImageBatchJobV1Response, -) - -from . import path - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateDockerImageBatchJobV1Request - - -request_body_update_docker_image_batch_job_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = UpdateDockerImageBatchJobV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Docker Image Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_batch_job_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_update_docker_image_batch_job_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='put'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class UpdateDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_files/__init__.py b/launch/api_client/paths/v1_files/__init__.py deleted file mode 100644 index 3028744d..00000000 --- a/launch/api_client/paths/v1_files/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_files import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_FILES \ No newline at end of file diff --git a/launch/api_client/paths/v1_files/get.py b/launch/api_client/paths/v1_files/get.py deleted file mode 100644 index 06eee942..00000000 --- a/launch/api_client/paths/v1_files/get.py +++ /dev/null @@ -1,241 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.list_files_response import ListFilesResponse - -from . import path - -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListFilesResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_files_v1_files_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_files_v1_files_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_files_v1_files_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_files_v1_files_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Files - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListFilesV1FilesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_files_v1_files_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_files_v1_files_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_files_v1_files_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_files_v1_files_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_files_v1_files_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_files_v1_files_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_files/post.py b/launch/api_client/paths/v1_files/post.py deleted file mode 100644 index 85c79c7f..00000000 --- a/launch/api_client/paths/v1_files/post.py +++ /dev/null @@ -1,356 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.body_upload_file_v1_files_post import ( - BodyUploadFileV1FilesPost, -) -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.upload_file_response import UploadFileResponse - -from . import path - -# body param -SchemaForRequestBodyMultipartFormData = BodyUploadFileV1FilesPost - - -request_body_body = api_client.RequestBody( - content={ - 'multipart/form-data': api_client.MediaType( - schema=SchemaForRequestBodyMultipartFormData), - }, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = UploadFileResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - content_type: typing_extensions.Literal["multipart/form-data"] = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _upload_file_v1_files_post_oapg( - self, - content_type: str = 'multipart/form-data', - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Upload File - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - _fields = None - _body = None - if body is not schemas.unset: - serialized_data = request_body_body.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class UploadFileV1FilesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def upload_file_v1_files_post( - self, - content_type: typing_extensions.Literal["multipart/form-data"] = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def upload_file_v1_files_post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def upload_file_v1_files_post( - self, - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def upload_file_v1_files_post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def upload_file_v1_files_post( - self, - content_type: str = 'multipart/form-data', - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._upload_file_v1_files_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - content_type: typing_extensions.Literal["multipart/form-data"] = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - content_type: str = 'multipart/form-data', - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._upload_file_v1_files_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_files_file_id/__init__.py b/launch/api_client/paths/v1_files_file_id/__init__.py deleted file mode 100644 index 7093c7cf..00000000 --- a/launch/api_client/paths/v1_files_file_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_files_file_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_FILES_FILE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_files_file_id/delete.py b/launch/api_client/paths/v1_files_file_id/delete.py deleted file mode 100644 index d7f9ecb7..00000000 --- a/launch/api_client/paths/v1_files_file_id/delete.py +++ /dev/null @@ -1,316 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.delete_file_response import DeleteFileResponse -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -FileIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'file_id': typing.Union[FileIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_file_id = api_client.PathParameter( - name="file_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FileIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = DeleteFileResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _delete_file_v1_files_file_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _delete_file_v1_files_file_id_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _delete_file_v1_files_file_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _delete_file_v1_files_file_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete File - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_file_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='delete'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class DeleteFileV1FilesFileIdDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_file_v1_files_file_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete_file_v1_files_file_id_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete_file_v1_files_file_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete_file_v1_files_file_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_file_v1_files_file_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_file_v1_files_file_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_files_file_id/get.py b/launch/api_client/paths/v1_files_file_id/get.py deleted file mode 100644 index 66559106..00000000 --- a/launch/api_client/paths/v1_files_file_id/get.py +++ /dev/null @@ -1,316 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_file_response import GetFileResponse -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -FileIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'file_id': typing.Union[FileIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_file_id = api_client.PathParameter( - name="file_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FileIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetFileResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_file_v1_files_file_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_file_v1_files_file_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_file_v1_files_file_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_file_v1_files_file_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get File - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_file_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetFileV1FilesFileIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_file_v1_files_file_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_file_v1_files_file_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_file_v1_files_file_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_file_v1_files_file_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_file_v1_files_file_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_file_v1_files_file_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_files_file_id_content/__init__.py b/launch/api_client/paths/v1_files_file_id_content/__init__.py deleted file mode 100644 index a0c46729..00000000 --- a/launch/api_client/paths/v1_files_file_id_content/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_files_file_id_content import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_FILES_FILE_ID_CONTENT \ No newline at end of file diff --git a/launch/api_client/paths/v1_files_file_id_content/get.py b/launch/api_client/paths/v1_files_file_id_content/get.py deleted file mode 100644 index 37a8642e..00000000 --- a/launch/api_client/paths/v1_files_file_id_content/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_file_content_response import ( - GetFileContentResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -FileIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'file_id': typing.Union[FileIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_file_id = api_client.PathParameter( - name="file_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FileIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetFileContentResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_file_content_v1_files_file_id_content_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_file_content_v1_files_file_id_content_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_file_content_v1_files_file_id_content_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_file_content_v1_files_file_id_content_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get File Content - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_file_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetFileContentV1FilesFileIdContentGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_file_content_v1_files_file_id_content_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_file_content_v1_files_file_id_content_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_file_content_v1_files_file_id_content_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_file_content_v1_files_file_id_content_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_file_content_v1_files_file_id_content_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_file_content_v1_files_file_id_content_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_batch_completions/__init__.py b/launch/api_client/paths/v1_llm_batch_completions/__init__.py deleted file mode 100644 index 6da01f4e..00000000 --- a/launch/api_client/paths/v1_llm_batch_completions/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_batch_completions import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_BATCHCOMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_batch_completions/post.py b/launch/api_client/paths/v1_llm_batch_completions/post.py deleted file mode 100644 index f3823b16..00000000 --- a/launch/api_client/paths/v1_llm_batch_completions/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_batch_completions_v1_request import ( - CreateBatchCompletionsV1Request, -) -from launch.api_client.model.create_batch_completions_v1_response import ( - CreateBatchCompletionsV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateBatchCompletionsV1Request - - -request_body_create_batch_completions_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateBatchCompletionsV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_batch_completions_v1_llm_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Batch Completions - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_batch_completions_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateBatchCompletionsV1LlmBatchCompletionsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_batch_completions_v1_llm_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_batch_completions_v1_llm_batch_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_batch_completions_v1_llm_batch_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_completion_sync/__init__.py b/launch/api_client/paths/v1_llm_completion_sync/__init__.py deleted file mode 100644 index 71c6b8c9..00000000 --- a/launch/api_client/paths/v1_llm_completion_sync/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_completion_sync import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_COMPLETIONSYNC diff --git a/launch/api_client/paths/v1_llm_completion_sync/post.py b/launch/api_client/paths/v1_llm_completion_sync/post.py deleted file mode 100644 index ef0407a1..00000000 --- a/launch/api_client/paths/v1_llm_completion_sync/post.py +++ /dev/null @@ -1,426 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.completion_sync_v1_request import ( - CompletionSyncV1Request, -) -from launch.api_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Query params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_endpoint_name = api_client.QueryParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointNameSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = CompletionSyncV1Request - - -request_body_completion_sync_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - "HTTPBasic", -] -SchemaFor200ResponseBodyApplicationJson = CompletionSyncV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, -} -_all_accept_content_types = ("application/json",) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... - - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... - - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Completion Sync Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_name,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_completion_sync_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response, - ) - - return api_response - - -class CreateCompletionSyncTaskV1LlmCompletionSyncPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... - - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... - - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completion_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... - - @typing.overload - def post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... - - def post( - self, - body: typing.Union[ - SchemaForRequestBodyApplicationJson, - ], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completion_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_completions_stream/__init__.py b/launch/api_client/paths/v1_llm_completions_stream/__init__.py deleted file mode 100644 index b7bc2957..00000000 --- a/launch/api_client/paths/v1_llm_completions_stream/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_completions_stream import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_COMPLETIONSSTREAM \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_completions_stream/post.py b/launch/api_client/paths/v1_llm_completions_stream/post.py deleted file mode 100644 index 38643dcb..00000000 --- a/launch/api_client/paths/v1_llm_completions_stream/post.py +++ /dev/null @@ -1,419 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.completion_stream_v1_request import ( - CompletionStreamV1Request, -) -from launch.api_client.model.completion_stream_v1_response import ( - CompletionStreamV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Query params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_endpoint_name = api_client.QueryParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointNameSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = CompletionStreamV1Request - - -request_body_completion_stream_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CompletionStreamV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Completion Stream Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_endpoint_name, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_completion_stream_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateCompletionStreamTaskV1LlmCompletionsStreamPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_stream_task_v1_llm_completions_stream_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_stream_task_v1_llm_completions_stream_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_completions_sync/__init__.py b/launch/api_client/paths/v1_llm_completions_sync/__init__.py deleted file mode 100644 index 2eec9261..00000000 --- a/launch/api_client/paths/v1_llm_completions_sync/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_completions_sync import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_COMPLETIONSSYNC \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_completions_sync/post.py b/launch/api_client/paths/v1_llm_completions_sync/post.py deleted file mode 100644 index 8a287bdd..00000000 --- a/launch/api_client/paths/v1_llm_completions_sync/post.py +++ /dev/null @@ -1,419 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.completion_sync_v1_request import ( - CompletionSyncV1Request, -) -from launch.api_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Query params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_endpoint_name = api_client.QueryParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointNameSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = CompletionSyncV1Request - - -request_body_completion_sync_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CompletionSyncV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Completion Sync Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_endpoint_name, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_completion_sync_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateCompletionSyncTaskV1LlmCompletionsSyncPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completions_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completions_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_fine_tunes/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes/__init__.py deleted file mode 100644 index 9c6cd840..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_fine_tunes import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_FINETUNES \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes/get.py b/launch/api_client/paths/v1_llm_fine_tunes/get.py deleted file mode 100644 index ed423f7f..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes/get.py +++ /dev/null @@ -1,243 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.list_fine_tunes_response import ( - ListFineTunesResponse, -) - -from . import path - -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListFineTunesResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Fine Tunes - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListFineTunesV1LlmFineTunesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_fine_tunes_v1_llm_fine_tunes_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_fine_tunes_v1_llm_fine_tunes_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_fine_tunes/post.py b/launch/api_client/paths/v1_llm_fine_tunes/post.py deleted file mode 100644 index 446835da..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_fine_tune_request import ( - CreateFineTuneRequest, -) -from launch.api_client.model.create_fine_tune_response import ( - CreateFineTuneResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateFineTuneRequest - - -request_body_create_fine_tune_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateFineTuneResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Fine Tune - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_fine_tune_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateFineTuneV1LlmFineTunesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_fine_tune_v1_llm_fine_tunes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_fine_tune_v1_llm_fine_tunes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/__init__.py deleted file mode 100644 index 32571f7e..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.py deleted file mode 100644 index f783879d..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.py +++ /dev/null @@ -1,316 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_fine_tune_response import GetFineTuneResponse -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -FineTuneIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'fine_tune_id': typing.Union[FineTuneIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_fine_tune_id = api_client.PathParameter( - name="fine_tune_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FineTuneIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetFineTuneResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Fine Tune - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_fine_tune_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetFineTuneV1LlmFineTunesFineTuneIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py deleted file mode 100644 index 216fd5a2..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_cancel import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.py deleted file mode 100644 index f8973227..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.cancel_fine_tune_response import ( - CancelFineTuneResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -FineTuneIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'fine_tune_id': typing.Union[FineTuneIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_fine_tune_id = api_client.PathParameter( - name="fine_tune_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FineTuneIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CancelFineTuneResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Cancel Fine Tune - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_fine_tune_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='put'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CancelFineTuneV1LlmFineTunesFineTuneIdCancelPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def put( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def put( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def put( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/__init__.py deleted file mode 100644 index efa06c77..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_events import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.py deleted file mode 100644 index e116c368..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_fine_tune_events_response import ( - GetFineTuneEventsResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -FineTuneIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'fine_tune_id': typing.Union[FineTuneIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_fine_tune_id = api_client.PathParameter( - name="fine_tune_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FineTuneIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetFineTuneEventsResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Fine Tune Events - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_fine_tune_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetFineTuneEventsV1LlmFineTunesFineTuneIdEventsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/__init__.py deleted file mode 100644 index 07ffed60..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_fine_tunes_model_endpoint_name_events import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_FINETUNES_MODEL_ENDPOINT_NAME_EVENTS diff --git a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.py b/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.py deleted file mode 100644 index a4792598..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.py +++ /dev/null @@ -1,297 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_fine_tune_events_response import ( - GetFineTuneEventsResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, - required=True, -) -_auth = [ - "HTTPBasic", -] -SchemaFor200ResponseBodyApplicationJson = GetFineTuneEventsResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, -} -_all_accept_content_types = ("application/json",) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... - - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... - - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Fine Tune Events - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_endpoint_name,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - - -class GetFineTuneEventsV1LlmFineTunesModelEndpointNameEventsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... - - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_model_endpoints/__init__.py b/launch/api_client/paths/v1_llm_model_endpoints/__init__.py deleted file mode 100644 index c84b3640..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_model_endpoints import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_MODELENDPOINTS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_model_endpoints/get.py b/launch/api_client/paths/v1_llm_model_endpoints/get.py deleted file mode 100644 index cf89e8bd..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints/get.py +++ /dev/null @@ -1,349 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.list_llm_model_endpoints_v1_response import ( - ListLLMModelEndpointsV1Response, -) -from launch.api_client.model.model_endpoint_order_by import ( - ModelEndpointOrderBy, -) - -from . import path - -# Query params - - -class NameSchema( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'NameSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) -OrderBySchema = ModelEndpointOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - 'name': typing.Union[NameSchema, None, str, ], - 'order_by': typing.Union[OrderBySchema, ], - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_name = api_client.QueryParameter( - name="name", - style=api_client.ParameterStyle.FORM, - schema=NameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListLLMModelEndpointsV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Endpoints - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListModelEndpointsV1LlmModelEndpointsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_llm_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_llm_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_model_endpoints/post.py b/launch/api_client/paths/v1_llm_model_endpoints/post.py deleted file mode 100644 index 2d1464b1..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_llm_model_endpoint_v1_request import ( - CreateLLMModelEndpointV1Request, -) -from launch.api_client.model.create_llm_model_endpoint_v1_response import ( - CreateLLMModelEndpointV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateLLMModelEndpointV1Request - - -request_body_create_llm_model_endpoint_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateLLMModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_llm_model_endpoint_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateModelEndpointV1LlmModelEndpointsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_model_endpoint_v1_llm_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_endpoint_v1_llm_model_endpoints_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_endpoint_v1_llm_model_endpoints_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_model_endpoints_download/__init__.py b/launch/api_client/paths/v1_llm_model_endpoints_download/__init__.py deleted file mode 100644 index a6b0f34f..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_download/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_model_endpoints_download import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_MODELENDPOINTS_DOWNLOAD \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_model_endpoints_download/post.py b/launch/api_client/paths/v1_llm_model_endpoints_download/post.py deleted file mode 100644 index 981af61b..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_download/post.py +++ /dev/null @@ -1,359 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.model_download_request import ModelDownloadRequest -from launch.api_client.model.model_download_response import ( - ModelDownloadResponse, -) - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = ModelDownloadRequest - - -request_body_model_download_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ModelDownloadResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Download Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_model_download_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class DownloadModelEndpointV1LlmModelEndpointsDownloadPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/__init__.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/__init__.py deleted file mode 100644 index c5a41d53..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.py deleted file mode 100644 index 2638eb9f..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.delete_llm_endpoint_response import ( - DeleteLLMEndpointResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = DeleteLLMEndpointResponse - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete Llm Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_endpoint_name, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='delete'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class DeleteLlmModelEndpointV1LlmModelEndpointsModelEndpointNameDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.py deleted file mode 100644 index 690338c6..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetLLMModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_endpoint_name, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetModelEndpointV1LlmModelEndpointsModelEndpointNameGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.py deleted file mode 100644 index f7ca8bf2..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.py +++ /dev/null @@ -1,418 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.update_llm_model_endpoint_v1_request import ( - UpdateLLMModelEndpointV1Request, -) -from launch.api_client.model.update_llm_model_endpoint_v1_response import ( - UpdateLLMModelEndpointV1Response, -) - -from . import path - -# Path params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateLLMModelEndpointV1Request - - -request_body_update_llm_model_endpoint_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = UpdateLLMModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_endpoint_name, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_update_llm_model_endpoint_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='put'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class UpdateModelEndpointV1LlmModelEndpointsModelEndpointNamePut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_bundles/__init__.py b/launch/api_client/paths/v1_model_bundles/__init__.py deleted file mode 100644 index 02168487..00000000 --- a/launch/api_client/paths/v1_model_bundles/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_bundles import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELBUNDLES \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles/get.py b/launch/api_client/paths/v1_model_bundles/get.py deleted file mode 100644 index a9fd9681..00000000 --- a/launch/api_client/paths/v1_model_bundles/get.py +++ /dev/null @@ -1,347 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.list_model_bundles_v1_response import ( - ListModelBundlesV1Response, -) -from launch.api_client.model.model_bundle_order_by import ModelBundleOrderBy - -from . import path - -# Query params - - -class ModelNameSchema( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ModelNameSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) -OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - 'model_name': typing.Union[ModelNameSchema, None, str, ], - 'order_by': typing.Union[OrderBySchema, ], - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListModelBundlesV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Bundles - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListModelBundlesV1ModelBundlesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_bundles_v1_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_model_bundles_v1_model_bundles_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_model_bundles_v1_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_model_bundles_v1_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v1_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v1_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_bundles/post.py b/launch/api_client/paths/v1_model_bundles/post.py deleted file mode 100644 index 6c9e4941..00000000 --- a/launch/api_client/paths/v1_model_bundles/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_model_bundle_v1_request import ( - CreateModelBundleV1Request, -) -from launch.api_client.model.create_model_bundle_v1_response import ( - CreateModelBundleV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateModelBundleV1Request - - -request_body_create_model_bundle_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_model_bundle_v1_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_model_bundle_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateModelBundleV1ModelBundlesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_model_bundle_v1_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_model_bundle_v1_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_model_bundle_v1_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_model_bundle_v1_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_model_bundle_v1_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_bundle_v1_model_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_bundle_v1_model_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_bundles_clone_with_changes/__init__.py b/launch/api_client/paths/v1_model_bundles_clone_with_changes/__init__.py deleted file mode 100644 index b5c960ff..00000000 --- a/launch/api_client/paths/v1_model_bundles_clone_with_changes/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_bundles_clone_with_changes import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.py b/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.py deleted file mode 100644 index 5a2dec3e..00000000 --- a/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.clone_model_bundle_v1_request import ( - CloneModelBundleV1Request, -) -from launch.api_client.model.create_model_bundle_v1_response import ( - CreateModelBundleV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CloneModelBundleV1Request - - -request_body_clone_model_bundle_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Clone Model Bundle With Changes - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_clone_model_bundle_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CloneModelBundleWithChangesV1ModelBundlesCloneWithChangesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_bundles_latest/__init__.py b/launch/api_client/paths/v1_model_bundles_latest/__init__.py deleted file mode 100644 index e8834ca7..00000000 --- a/launch/api_client/paths/v1_model_bundles_latest/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_bundles_latest import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELBUNDLES_LATEST \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles_latest/get.py b/launch/api_client/paths/v1_model_bundles_latest/get.py deleted file mode 100644 index 72da3ddf..00000000 --- a/launch/api_client/paths/v1_model_bundles_latest/get.py +++ /dev/null @@ -1,319 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.model_bundle_v1_response import ( - ModelBundleV1Response, -) - -from . import path - -# Query params -ModelNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'model_name': typing.Union[ModelNameSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - required=True, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ModelBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Latest Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_name, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetLatestModelBundleV1ModelBundlesLatestGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_bundles_model_bundle_id/__init__.py b/launch/api_client/paths/v1_model_bundles_model_bundle_id/__init__.py deleted file mode 100644 index aeafa82e..00000000 --- a/launch/api_client/paths/v1_model_bundles_model_bundle_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_bundles_model_bundle_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.py b/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.py deleted file mode 100644 index 551e1342..00000000 --- a/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.model_bundle_v1_response import ( - ModelBundleV1Response, -) - -from . import path - -# Path params -ModelBundleIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_bundle_id': typing.Union[ModelBundleIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_bundle_id = api_client.PathParameter( - name="model_bundle_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelBundleIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ModelBundleV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_bundle_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetModelBundleV1ModelBundlesModelBundleIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints/__init__.py b/launch/api_client/paths/v1_model_endpoints/__init__.py deleted file mode 100644 index 63a2873d..00000000 --- a/launch/api_client/paths/v1_model_endpoints/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_endpoints import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELENDPOINTS \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints/get.py b/launch/api_client/paths/v1_model_endpoints/get.py deleted file mode 100644 index 6b93f989..00000000 --- a/launch/api_client/paths/v1_model_endpoints/get.py +++ /dev/null @@ -1,349 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.list_model_endpoints_v1_response import ( - ListModelEndpointsV1Response, -) -from launch.api_client.model.model_endpoint_order_by import ( - ModelEndpointOrderBy, -) - -from . import path - -# Query params - - -class NameSchema( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'NameSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) -OrderBySchema = ModelEndpointOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - 'name': typing.Union[NameSchema, None, str, ], - 'order_by': typing.Union[OrderBySchema, ], - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_name = api_client.QueryParameter( - name="name", - style=api_client.ParameterStyle.FORM, - schema=NameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListModelEndpointsV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Endpoints - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListModelEndpointsV1ModelEndpointsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_endpoints_v1_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_model_endpoints_v1_model_endpoints_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_model_endpoints_v1_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_model_endpoints_v1_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints/post.py b/launch/api_client/paths/v1_model_endpoints/post.py deleted file mode 100644 index 65ce528a..00000000 --- a/launch/api_client/paths/v1_model_endpoints/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_model_endpoint_v1_request import ( - CreateModelEndpointV1Request, -) -from launch.api_client.model.create_model_endpoint_v1_response import ( - CreateModelEndpointV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateModelEndpointV1Request - - -request_body_create_model_endpoint_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_model_endpoint_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateModelEndpointV1ModelEndpointsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_endpoint_v1_model_endpoints_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_endpoint_v1_model_endpoints_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints_api/__init__.py b/launch/api_client/paths/v1_model_endpoints_api/__init__.py deleted file mode 100644 index 69585e08..00000000 --- a/launch/api_client/paths/v1_model_endpoints_api/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_endpoints_api import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELENDPOINTSAPI \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_api/get.py b/launch/api_client/paths/v1_model_endpoints_api/get.py deleted file mode 100644 index 3b082c58..00000000 --- a/launch/api_client/paths/v1_model_endpoints_api/get.py +++ /dev/null @@ -1,240 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions - -from . import path - -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Endpoints Api - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetModelEndpointsApiV1ModelEndpointsApiGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/__init__.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/__init__.py deleted file mode 100644 index e382b4ff..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_endpoints_model_endpoint_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.py deleted file mode 100644 index eb4e8a7e..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.delete_model_endpoint_v1_response import ( - DeleteModelEndpointV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_id = api_client.PathParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = DeleteModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_endpoint_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='delete'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class DeleteModelEndpointV1ModelEndpointsModelEndpointIdDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.py deleted file mode 100644 index 382f63f2..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_id = api_client.PathParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_endpoint_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetModelEndpointV1ModelEndpointsModelEndpointIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.py deleted file mode 100644 index c5f4f94b..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.py +++ /dev/null @@ -1,418 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.update_model_endpoint_v1_request import ( - UpdateModelEndpointV1Request, -) -from launch.api_client.model.update_model_endpoint_v1_response import ( - UpdateModelEndpointV1Response, -) - -from . import path - -# Path params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_id = api_client.PathParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateModelEndpointV1Request - - -request_body_update_model_endpoint_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = UpdateModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_endpoint_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_update_model_endpoint_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='put'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class UpdateModelEndpointV1ModelEndpointsModelEndpointIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/__init__.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/__init__.py deleted file mode 100644 index e9519dcb..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/post.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/post.py deleted file mode 100644 index 21362c71..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/post.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.restart_model_endpoint_v1_response import ( - RestartModelEndpointV1Response, -) - -from . import path - -# Path params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_endpoint_id = api_client.PathParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = RestartModelEndpointV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Restart Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_endpoint_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class RestartModelEndpointV1ModelEndpointsModelEndpointIdRestartPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_model_endpoints_schema_json/__init__.py b/launch/api_client/paths/v1_model_endpoints_schema_json/__init__.py deleted file mode 100644 index bbefa10f..00000000 --- a/launch/api_client/paths/v1_model_endpoints_schema_json/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_model_endpoints_schema_json import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_MODELENDPOINTSSCHEMA_JSON \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_schema_json/get.py b/launch/api_client/paths/v1_model_endpoints_schema_json/get.py deleted file mode 100644 index a12646fa..00000000 --- a/launch/api_client/paths/v1_model_endpoints_schema_json/get.py +++ /dev/null @@ -1,240 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions - -from . import path - -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Endpoints Schema - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetModelEndpointsSchemaV1ModelEndpointsSchemaJsonGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_streaming_tasks/__init__.py b/launch/api_client/paths/v1_streaming_tasks/__init__.py deleted file mode 100644 index f5b3efaa..00000000 --- a/launch/api_client/paths/v1_streaming_tasks/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_streaming_tasks import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_STREAMINGTASKS \ No newline at end of file diff --git a/launch/api_client/paths/v1_streaming_tasks/post.py b/launch/api_client/paths/v1_streaming_tasks/post.py deleted file mode 100644 index 366fd7d0..00000000 --- a/launch/api_client/paths/v1_streaming_tasks/post.py +++ /dev/null @@ -1,416 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.sync_endpoint_predict_v1_request import ( - SyncEndpointPredictV1Request, -) - -from . import path - -# Query params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_endpoint_id = api_client.QueryParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointIdSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = SyncEndpointPredictV1Request - - -request_body_sync_endpoint_predict_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Streaming Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_endpoint_id, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_sync_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateStreamingInferenceTaskV1StreamingTasksPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_streaming_inference_task_v1_streaming_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_streaming_inference_task_v1_streaming_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_sync_tasks/__init__.py b/launch/api_client/paths/v1_sync_tasks/__init__.py deleted file mode 100644 index 0ad403fe..00000000 --- a/launch/api_client/paths/v1_sync_tasks/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_sync_tasks import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_SYNCTASKS \ No newline at end of file diff --git a/launch/api_client/paths/v1_sync_tasks/post.py b/launch/api_client/paths/v1_sync_tasks/post.py deleted file mode 100644 index c2cda3fc..00000000 --- a/launch/api_client/paths/v1_sync_tasks/post.py +++ /dev/null @@ -1,419 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.sync_endpoint_predict_v1_request import ( - SyncEndpointPredictV1Request, -) -from launch.api_client.model.sync_endpoint_predict_v1_response import ( - SyncEndpointPredictV1Response, -) - -from . import path - -# Query params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_endpoint_id = api_client.QueryParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointIdSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = SyncEndpointPredictV1Request - - -request_body_sync_endpoint_predict_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = SyncEndpointPredictV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Sync Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_endpoint_id, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_sync_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateSyncInferenceTaskV1SyncTasksPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_sync_inference_task_v1_sync_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_sync_inference_task_v1_sync_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_triggers/__init__.py b/launch/api_client/paths/v1_triggers/__init__.py deleted file mode 100644 index 85662b08..00000000 --- a/launch/api_client/paths/v1_triggers/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_triggers import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_TRIGGERS \ No newline at end of file diff --git a/launch/api_client/paths/v1_triggers/get.py b/launch/api_client/paths/v1_triggers/get.py deleted file mode 100644 index e57b9ff3..00000000 --- a/launch/api_client/paths/v1_triggers/get.py +++ /dev/null @@ -1,243 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.list_triggers_v1_response import ( - ListTriggersV1Response, -) - -from . import path - -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListTriggersV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_triggers_v1_triggers_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_triggers_v1_triggers_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_triggers_v1_triggers_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_triggers_v1_triggers_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Triggers - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListTriggersV1TriggersGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_triggers_v1_triggers_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_triggers_v1_triggers_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_triggers_v1_triggers_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_triggers_v1_triggers_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_triggers_v1_triggers_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_triggers_v1_triggers_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_triggers/post.py b/launch/api_client/paths/v1_triggers/post.py deleted file mode 100644 index 25de106c..00000000 --- a/launch/api_client/paths/v1_triggers/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_trigger_v1_request import ( - CreateTriggerV1Request, -) -from launch.api_client.model.create_trigger_v1_response import ( - CreateTriggerV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateTriggerV1Request - - -request_body_create_trigger_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateTriggerV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_trigger_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateTriggerV1TriggersPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_trigger_v1_triggers_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_trigger_v1_triggers_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_triggers_trigger_id/__init__.py b/launch/api_client/paths/v1_triggers_trigger_id/__init__.py deleted file mode 100644 index fbafad54..00000000 --- a/launch/api_client/paths/v1_triggers_trigger_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v1_triggers_trigger_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V1_TRIGGERS_TRIGGER_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_triggers_trigger_id/delete.py b/launch/api_client/paths/v1_triggers_trigger_id/delete.py deleted file mode 100644 index 14680356..00000000 --- a/launch/api_client/paths/v1_triggers_trigger_id/delete.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.delete_trigger_v1_response import ( - DeleteTriggerV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -TriggerIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'trigger_id': typing.Union[TriggerIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_trigger_id = api_client.PathParameter( - name="trigger_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TriggerIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = DeleteTriggerV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_trigger_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='delete'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class DeleteTriggerV1TriggersTriggerIdDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_trigger_v1_triggers_trigger_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete_trigger_v1_triggers_trigger_id_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete_trigger_v1_triggers_trigger_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete_trigger_v1_triggers_trigger_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_trigger_v1_triggers_trigger_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_trigger_v1_triggers_trigger_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_triggers_trigger_id/get.py b/launch/api_client/paths/v1_triggers_trigger_id/get.py deleted file mode 100644 index e99f9008..00000000 --- a/launch/api_client/paths/v1_triggers_trigger_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_trigger_v1_response import ( - GetTriggerV1Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -TriggerIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'trigger_id': typing.Union[TriggerIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_trigger_id = api_client.PathParameter( - name="trigger_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TriggerIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetTriggerV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_trigger_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetTriggerV1TriggersTriggerIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_trigger_v1_triggers_trigger_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_trigger_v1_triggers_trigger_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_trigger_v1_triggers_trigger_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_trigger_v1_triggers_trigger_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_trigger_v1_triggers_trigger_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_trigger_v1_triggers_trigger_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v1_triggers_trigger_id/put.py b/launch/api_client/paths/v1_triggers_trigger_id/put.py deleted file mode 100644 index 80d7b839..00000000 --- a/launch/api_client/paths/v1_triggers_trigger_id/put.py +++ /dev/null @@ -1,418 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.update_trigger_v1_request import ( - UpdateTriggerV1Request, -) -from launch.api_client.model.update_trigger_v1_response import ( - UpdateTriggerV1Response, -) - -from . import path - -# Path params -TriggerIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'trigger_id': typing.Union[TriggerIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_trigger_id = api_client.PathParameter( - name="trigger_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TriggerIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateTriggerV1Request - - -request_body_update_trigger_v1_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = UpdateTriggerV1Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_trigger_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_update_trigger_v1_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='put'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class UpdateTriggerV1TriggersTriggerIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_trigger_v1_triggers_trigger_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_trigger_v1_triggers_trigger_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_batch_completions/__init__.py b/launch/api_client/paths/v2_batch_completions/__init__.py deleted file mode 100644 index 3d318666..00000000 --- a/launch/api_client/paths/v2_batch_completions/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_batch_completions import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_BATCHCOMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v2_batch_completions/post.py b/launch/api_client/paths/v2_batch_completions/post.py deleted file mode 100644 index 3816de3d..00000000 --- a/launch/api_client/paths/v2_batch_completions/post.py +++ /dev/null @@ -1,359 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.batch_completions_job import BatchCompletionsJob -from launch.api_client.model.create_batch_completions_v2_request import ( - CreateBatchCompletionsV2Request, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateBatchCompletionsV2Request - - -request_body_create_batch_completions_v2_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = BatchCompletionsJob - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _batch_completions_v2_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _batch_completions_v2_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _batch_completions_v2_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _batch_completions_v2_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _batch_completions_v2_batch_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Batch Completions - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_batch_completions_v2_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class BatchCompletionsV2BatchCompletionsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def batch_completions_v2_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def batch_completions_v2_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def batch_completions_v2_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def batch_completions_v2_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def batch_completions_v2_batch_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._batch_completions_v2_batch_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._batch_completions_v2_batch_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_batch_completions_batch_completion_id/__init__.py b/launch/api_client/paths/v2_batch_completions_batch_completion_id/__init__.py deleted file mode 100644 index 58a7431f..00000000 --- a/launch/api_client/paths/v2_batch_completions_batch_completion_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_batch_completions_batch_completion_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID \ No newline at end of file diff --git a/launch/api_client/paths/v2_batch_completions_batch_completion_id/get.py b/launch/api_client/paths/v2_batch_completions_batch_completion_id/get.py deleted file mode 100644 index 20cc744e..00000000 --- a/launch/api_client/paths/v2_batch_completions_batch_completion_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.get_batch_completion_v2_response import ( - GetBatchCompletionV2Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -BatchCompletionIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'batch_completion_id': typing.Union[BatchCompletionIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_batch_completion_id = api_client.PathParameter( - name="batch_completion_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchCompletionIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = GetBatchCompletionV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Batch Completion - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_batch_completion_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetBatchCompletionV2BatchCompletionsBatchCompletionIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_batch_completion_v2_batch_completions_batch_completion_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_batch_completion_v2_batch_completions_batch_completion_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_batch_completion_v2_batch_completions_batch_completion_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_batch_completion_v2_batch_completions_batch_completion_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_batch_completions_batch_completion_id/post.py b/launch/api_client/paths/v2_batch_completions_batch_completion_id/post.py deleted file mode 100644 index 9617b9e3..00000000 --- a/launch/api_client/paths/v2_batch_completions_batch_completion_id/post.py +++ /dev/null @@ -1,418 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.update_batch_completions_v2_request import ( - UpdateBatchCompletionsV2Request, -) -from launch.api_client.model.update_batch_completions_v2_response import ( - UpdateBatchCompletionsV2Response, -) - -from . import path - -# Path params -BatchCompletionIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'batch_completion_id': typing.Union[BatchCompletionIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_batch_completion_id = api_client.PathParameter( - name="batch_completion_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchCompletionIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateBatchCompletionsV2Request - - -request_body_update_batch_completions_v2_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = UpdateBatchCompletionsV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Batch Completion - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_batch_completion_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_update_batch_completions_v2_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class UpdateBatchCompletionV2BatchCompletionsBatchCompletionIdPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_batch_completion_v2_batch_completions_batch_completion_id_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def update_batch_completion_v2_batch_completions_batch_completion_id_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def update_batch_completion_v2_batch_completions_batch_completion_id_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def update_batch_completion_v2_batch_completions_batch_completion_id_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def update_batch_completion_v2_batch_completions_batch_completion_id_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/__init__.py b/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/__init__.py deleted file mode 100644 index f5d04293..00000000 --- a/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL \ No newline at end of file diff --git a/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/post.py b/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/post.py deleted file mode 100644 index f875a254..00000000 --- a/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/post.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.cancel_batch_completions_v2_response import ( - CancelBatchCompletionsV2Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# Path params -BatchCompletionIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'batch_completion_id': typing.Union[BatchCompletionIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_batch_completion_id = api_client.PathParameter( - name="batch_completion_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchCompletionIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CancelBatchCompletionsV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Cancel Batch Completion - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_batch_completion_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CancelBatchCompletionV2BatchCompletionsBatchCompletionIdActionsCancelPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_chat_completions/__init__.py b/launch/api_client/paths/v2_chat_completions/__init__.py deleted file mode 100644 index 1d410511..00000000 --- a/launch/api_client/paths/v2_chat_completions/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_chat_completions import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_CHAT_COMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v2_chat_completions/post.py b/launch/api_client/paths/v2_chat_completions/post.py deleted file mode 100644 index 7b4f0b21..00000000 --- a/launch/api_client/paths/v2_chat_completions/post.py +++ /dev/null @@ -1,404 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.chat_completion_v2_request import ( - ChatCompletionV2Request, -) -from launch.api_client.model.chat_completion_v2_stream_error_chunk import ( - ChatCompletionV2StreamErrorChunk, -) -from launch.api_client.model.create_chat_completion_response import ( - CreateChatCompletionResponse, -) -from launch.api_client.model.create_chat_completion_stream_response import ( - CreateChatCompletionStreamResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = ChatCompletionV2Request - - -request_body_chat_completion_v2_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] - - -class SchemaFor200ResponseBodyApplicationJson( - schemas.ComposedSchema, -): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateChatCompletionResponse, - CreateChatCompletionStreamResponse, - ChatCompletionV2StreamErrorChunk, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'SchemaFor200ResponseBodyApplicationJson': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _chat_completion_v2_chat_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _chat_completion_v2_chat_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _chat_completion_v2_chat_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _chat_completion_v2_chat_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _chat_completion_v2_chat_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Chat Completion - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_chat_completion_v2_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ChatCompletionV2ChatCompletionsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def chat_completion_v2_chat_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def chat_completion_v2_chat_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def chat_completion_v2_chat_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def chat_completion_v2_chat_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def chat_completion_v2_chat_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._chat_completion_v2_chat_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._chat_completion_v2_chat_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_completions/__init__.py b/launch/api_client/paths/v2_completions/__init__.py deleted file mode 100644 index 9b8209ec..00000000 --- a/launch/api_client/paths/v2_completions/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_completions import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_COMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v2_completions/post.py b/launch/api_client/paths/v2_completions/post.py deleted file mode 100644 index 1aa541d3..00000000 --- a/launch/api_client/paths/v2_completions/post.py +++ /dev/null @@ -1,398 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.completion_v2_request import CompletionV2Request -from launch.api_client.model.completion_v2_stream_error_chunk import ( - CompletionV2StreamErrorChunk, -) -from launch.api_client.model.create_completion_response import ( - CreateCompletionResponse, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CompletionV2Request - - -request_body_completion_v2_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] - - -class SchemaFor200ResponseBodyApplicationJson( - schemas.ComposedSchema, -): - - - class MetaOapg: - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateCompletionResponse, - CompletionV2StreamErrorChunk, - ] - - - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], - ) -> 'SchemaFor200ResponseBodyApplicationJson': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _completion_v2_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _completion_v2_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _completion_v2_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _completion_v2_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _completion_v2_completions_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Completion - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_completion_v2_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CompletionV2CompletionsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def completion_v2_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def completion_v2_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def completion_v2_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def completion_v2_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def completion_v2_completions_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._completion_v2_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._completion_v2_completions_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_model_bundles/__init__.py b/launch/api_client/paths/v2_model_bundles/__init__.py deleted file mode 100644 index a34ff03c..00000000 --- a/launch/api_client/paths/v2_model_bundles/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_model_bundles import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_MODELBUNDLES \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles/get.py b/launch/api_client/paths/v2_model_bundles/get.py deleted file mode 100644 index 61b55b48..00000000 --- a/launch/api_client/paths/v2_model_bundles/get.py +++ /dev/null @@ -1,347 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.list_model_bundles_v2_response import ( - ListModelBundlesV2Response, -) -from launch.api_client.model.model_bundle_order_by import ModelBundleOrderBy - -from . import path - -# Query params - - -class ModelNameSchema( - schemas.StrBase, - schemas.NoneBase, - schemas.Schema, - schemas.NoneStrMixin -): - - - def __new__( - cls, - *_args: typing.Union[None, str, ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> 'ModelNameSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) -OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - 'model_name': typing.Union[ModelNameSchema, None, str, ], - 'order_by': typing.Union[OrderBySchema, ], - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ListModelBundlesV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Bundles - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class ListModelBundlesV2ModelBundlesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_bundles_v2_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def list_model_bundles_v2_model_bundles_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def list_model_bundles_v2_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def list_model_bundles_v2_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v2_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v2_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_model_bundles/post.py b/launch/api_client/paths/v2_model_bundles/post.py deleted file mode 100644 index 995ad152..00000000 --- a/launch/api_client/paths/v2_model_bundles/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.create_model_bundle_v2_request import ( - CreateModelBundleV2Request, -) -from launch.api_client.model.create_model_bundle_v2_response import ( - CreateModelBundleV2Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CreateModelBundleV2Request - - -request_body_create_model_bundle_v2_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_create_model_bundle_v2_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CreateModelBundleV2ModelBundlesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_bundle_v2_model_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_bundle_v2_model_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_model_bundles_clone_with_changes/__init__.py b/launch/api_client/paths/v2_model_bundles_clone_with_changes/__init__.py deleted file mode 100644 index ac407cd6..00000000 --- a/launch/api_client/paths/v2_model_bundles_clone_with_changes/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_model_bundles_clone_with_changes import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.py b/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.py deleted file mode 100644 index e09e518a..00000000 --- a/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.clone_model_bundle_v2_request import ( - CloneModelBundleV2Request, -) -from launch.api_client.model.create_model_bundle_v2_response import ( - CreateModelBundleV2Response, -) -from launch.api_client.model.http_validation_error import HTTPValidationError - -from . import path - -# body param -SchemaForRequestBodyApplicationJson = CloneModelBundleV2Request - - -request_body_clone_model_bundle_v2_request = api_client.RequestBody( - content={ - 'application/json': api_client.MediaType( - schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Clone Model Bundle With Changes - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - 'The required body parameter has an invalid value of: unset. Set a valid value instead') - _fields = None - _body = None - serialized_data = request_body_clone_model_bundle_v2_request.serialize(body, content_type) - _headers.add('Content-Type', content_type) - if 'fields' in serialized_data: - _fields = serialized_data['fields'] - elif 'body' in serialized_data: - _body = serialized_data['body'] - response = self.api_client.call_api( - resource_path=used_path, - method='post'.upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class CloneModelBundleWithChangesV2ModelBundlesCloneWithChangesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = 'application/json', - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_model_bundles_latest/__init__.py b/launch/api_client/paths/v2_model_bundles_latest/__init__.py deleted file mode 100644 index 190c18d6..00000000 --- a/launch/api_client/paths/v2_model_bundles_latest/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_model_bundles_latest import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_MODELBUNDLES_LATEST \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles_latest/get.py b/launch/api_client/paths/v2_model_bundles_latest/get.py deleted file mode 100644 index 54ee64b1..00000000 --- a/launch/api_client/paths/v2_model_bundles_latest/get.py +++ /dev/null @@ -1,319 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.model_bundle_v2_response import ( - ModelBundleV2Response, -) - -from . import path - -# Query params -ModelNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - 'RequestRequiredQueryParams', - { - 'model_name': typing.Union[ModelNameSchema, str, ], - } -) -RequestOptionalQueryParams = typing_extensions.TypedDict( - 'RequestOptionalQueryParams', - { - }, - total=False -) - - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - required=True, - explode=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ModelBundleV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Latest Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_name, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetLatestModelBundleV2ModelBundlesLatestGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/paths/v2_model_bundles_model_bundle_id/__init__.py b/launch/api_client/paths/v2_model_bundles_model_bundle_id/__init__.py deleted file mode 100644 index 783f2147..00000000 --- a/launch/api_client/paths/v2_model_bundles_model_bundle_id/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# do not import all endpoints into this module because that uses a lot of memory and stack frames -# if you need the ability to import all endpoints from this module, import them with -# from launch.api_client.paths.v2_model_bundles_model_bundle_id import Api - -from launch.api_client.paths import PathValues - -path = PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.py b/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.py deleted file mode 100644 index 91c9f870..00000000 --- a/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client import schemas # noqa: F401 -from launch.api_client import api_client, exceptions -from launch.api_client.model.http_validation_error import HTTPValidationError -from launch.api_client.model.model_bundle_v2_response import ( - ModelBundleV2Response, -) - -from . import path - -# Path params -ModelBundleIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - 'RequestRequiredPathParams', - { - 'model_bundle_id': typing.Union[ModelBundleIdSchema, str, ], - } -) -RequestOptionalPathParams = typing_extensions.TypedDict( - 'RequestOptionalPathParams', - { - }, - total=False -) - - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - - -request_path_model_bundle_id = api_client.PathParameter( - name="model_bundle_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelBundleIdSchema, - required=True, -) -_auth = [ - 'OAuth2PasswordBearer', - 'HTTPBasic', -] -SchemaFor200ResponseBodyApplicationJson = ModelBundleV2Response - - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor200ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[ - SchemaFor422ResponseBodyApplicationJson, - ] - headers: schemas.Unset = schemas.unset - - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - 'application/json': api_client.MediaType( - schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_status_code_to_response = { - '200': _response_for_200, - '422': _response_for_422, -} -_all_accept_content_types = ( - 'application/json', -) - - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in ( - request_path_model_bundle_id, - ): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace('{%s}' % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add('Accept', accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method='get'.upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response - ) - - return api_response - - -class GetModelBundleV2ModelBundlesModelBundleIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ - ApiResponseFor200, - ]: ... - - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ - ApiResponseFor200, - api_client.ApiResponseWithoutDeserialization, - ]: ... - - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization - ) - - diff --git a/launch/api_client/py.typed b/launch/api_client/py.typed deleted file mode 100644 index e69de29b..00000000 diff --git a/launch/api_client/rest.py b/launch/api_client/rest.py deleted file mode 100644 index b09e2a34..00000000 --- a/launch/api_client/rest.py +++ /dev/null @@ -1,253 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import logging -import ssl -import typing -from urllib.parse import urlencode - -import certifi -import urllib3 -from urllib3._collections import HTTPHeaderDict - -from launch.api_client.exceptions import ApiException, ApiValueError - -logger = logging.getLogger(__name__) - - -class RESTClientObject(object): - - def __init__(self, configuration, pools_size=4, maxsize=None): - # urllib3.PoolManager will pass all kw parameters to connectionpool - # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501 - # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501 - # maxsize is the number of requests to host that are allowed in parallel # noqa: E501 - # Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501 - - # cert_reqs - if configuration.verify_ssl: - cert_reqs = ssl.CERT_REQUIRED - else: - cert_reqs = ssl.CERT_NONE - - # ca_certs - if configuration.ssl_ca_cert: - ca_certs = configuration.ssl_ca_cert - else: - # if not set certificate file, use Mozilla's root certificates. - ca_certs = certifi.where() - - addition_pool_args = {} - if configuration.assert_hostname is not None: - addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501 - - if configuration.retries is not None: - addition_pool_args['retries'] = configuration.retries - - if configuration.socket_options is not None: - addition_pool_args['socket_options'] = configuration.socket_options - - if maxsize is None: - if configuration.connection_pool_maxsize is not None: - maxsize = configuration.connection_pool_maxsize - else: - maxsize = 4 - - # https pool manager - if configuration.proxy: - self.pool_manager = urllib3.ProxyManager( - num_pools=pools_size, - maxsize=maxsize, - cert_reqs=cert_reqs, - ca_certs=ca_certs, - cert_file=configuration.cert_file, - key_file=configuration.key_file, - proxy_url=configuration.proxy, - proxy_headers=configuration.proxy_headers, - **addition_pool_args - ) - else: - self.pool_manager = urllib3.PoolManager( - num_pools=pools_size, - maxsize=maxsize, - cert_reqs=cert_reqs, - ca_certs=ca_certs, - cert_file=configuration.cert_file, - key_file=configuration.key_file, - **addition_pool_args - ) - - def request( - self, - method: str, - url: str, - headers: typing.Optional[HTTPHeaderDict] = None, - fields: typing.Optional[typing.Tuple[typing.Tuple[str, typing.Any], ...]] = None, - body: typing.Optional[typing.Union[str, bytes]] = None, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> urllib3.HTTPResponse: - """Perform requests. - - :param method: http request method - :param url: http request url - :param headers: http request headers - :param body: request body, for other types - :param fields: request parameters for - `application/x-www-form-urlencoded` - or `multipart/form-data` - :param stream: if True, the urllib3.HTTPResponse object will - be returned without reading/decoding response - data. Default is False. - :param timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - """ - method = method.upper() - assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', - 'PATCH', 'OPTIONS'] - - if fields and body: - raise ApiValueError( - "body parameter cannot be used with fields parameter." - ) - - fields = fields or {} - headers = headers or {} - - if timeout: - if isinstance(timeout, (int, float)): # noqa: E501,F821 - timeout = urllib3.Timeout(total=timeout) - elif (isinstance(timeout, tuple) and - len(timeout) == 2): - timeout = urllib3.Timeout(connect=timeout[0], read=timeout[1]) - - try: - # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` - if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']: - if 'Content-Type' not in headers and body is None: - r = self.pool_manager.request( - method, - url, - preload_content=not stream, - timeout=timeout, - headers=headers - ) - elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501 - r = self.pool_manager.request( - method, url, - body=body, - fields=fields, - encode_multipart=False, - preload_content=not stream, - timeout=timeout, - headers=headers) - elif headers['Content-Type'] == 'multipart/form-data': - # must del headers['Content-Type'], or the correct - # Content-Type which generated by urllib3 will be - # overwritten. - del headers['Content-Type'] - r = self.pool_manager.request( - method, url, - fields=fields, - encode_multipart=True, - preload_content=not stream, - timeout=timeout, - headers=headers) - # Pass a `string` parameter directly in the body to support - # other content types than Json when `body` argument is - # provided in serialized form - elif isinstance(body, str) or isinstance(body, bytes): - request_body = body - r = self.pool_manager.request( - method, url, - body=request_body, - preload_content=not stream, - timeout=timeout, - headers=headers) - else: - # Cannot generate the request from given parameters - msg = """Cannot prepare a request message for provided - arguments. Please check that your arguments match - declared content type.""" - raise ApiException(status=0, reason=msg) - # For `GET`, `HEAD` - else: - r = self.pool_manager.request(method, url, - preload_content=not stream, - timeout=timeout, - headers=headers) - except urllib3.exceptions.SSLError as e: - msg = "{0}\n{1}".format(type(e).__name__, str(e)) - raise ApiException(status=0, reason=msg) - - if not stream: - # log response body - logger.debug("response body: %s", r.data) - - return r - - def GET(self, url, headers=None, stream=False, - timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("GET", url, - headers=headers, - stream=stream, - timeout=timeout, - fields=fields) - - def HEAD(self, url, headers=None, stream=False, - timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("HEAD", url, - headers=headers, - stream=stream, - timeout=timeout, - fields=fields) - - def OPTIONS(self, url, headers=None, - body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("OPTIONS", url, - headers=headers, - stream=stream, - timeout=timeout, - body=body, fields=fields) - - def DELETE(self, url, headers=None, body=None, - stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("DELETE", url, - headers=headers, - stream=stream, - timeout=timeout, - body=body, fields=fields) - - def POST(self, url, headers=None, - body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("POST", url, - headers=headers, - stream=stream, - timeout=timeout, - body=body, fields=fields) - - def PUT(self, url, headers=None, - body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("PUT", url, - headers=headers, - stream=stream, - timeout=timeout, - body=body, fields=fields) - - def PATCH(self, url, headers=None, - body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("PATCH", url, - headers=headers, - stream=stream, - timeout=timeout, - body=body, fields=fields) diff --git a/launch/api_client/schemas.py b/launch/api_client/schemas.py deleted file mode 100644 index b69a624f..00000000 --- a/launch/api_client/schemas.py +++ /dev/null @@ -1,2470 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal -import functools -import io -import re -import types -import typing -import uuid -from collections import defaultdict -from datetime import date, datetime, timedelta # noqa: F401 - -import frozendict -from dateutil.parser.isoparser import _takes_ascii, isoparser - -from launch.api_client.configuration import Configuration -from launch.api_client.exceptions import ApiTypeError, ApiValueError - - -class Unset(object): - """ - An instance of this class is set as the default value for object type(dict) properties that are optional - When a property has an unset value, that property will not be assigned in the dict - """ - pass - -unset = Unset() - -none_type = type(None) -file_type = io.IOBase - - -class FileIO(io.FileIO): - """ - A class for storing files - Note: this class is not immutable - """ - - def __new__(cls, _arg: typing.Union[io.FileIO, io.BufferedReader]): - if isinstance(_arg, (io.FileIO, io.BufferedReader)): - if _arg.closed: - raise ApiValueError('Invalid file state; file is closed and must be open') - _arg.close() - inst = super(FileIO, cls).__new__(cls, _arg.name) - super(FileIO, inst).__init__(_arg.name) - return inst - raise ApiValueError('FileIO must be passed _arg which contains the open file') - - def __init__(self, _arg: typing.Union[io.FileIO, io.BufferedReader]): - pass - - -def update(d: dict, u: dict): - """ - Adds u to d - Where each dict is defaultdict(set) - """ - if not u: - return d - for k, v in u.items(): - if k not in d: - d[k] = v - else: - d[k] = d[k] | v - - -class ValidationMetadata(frozendict.frozendict): - """ - A class storing metadata that is needed to validate OpenApi Schema payloads - """ - def __new__( - cls, - path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(['args[0]']), - from_server: bool = False, - configuration: typing.Optional[Configuration] = None, - seen_classes: typing.FrozenSet[typing.Type] = frozenset(), - validated_path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]] = frozendict.frozendict() - ): - """ - Args: - path_to_item: the path to the current data being instantiated. - For {'a': [1]} if the code is handling, 1, then the path is ('args[0]', 'a', 0) - This changes from location to location - from_server: whether or not this data came form the server - True when receiving server data - False when instantiating model with client side data not form the server - This does not change from location to location - configuration: the Configuration instance to use - This is needed because in Configuration: - - one can disable validation checking - This does not change from location to location - seen_classes: when deserializing data that matches multiple schemas, this is used to store - the schemas that have been traversed. This is used to stop processing when a cycle is seen. - This changes from location to location - validated_path_to_schemas: stores the already validated schema classes for a given path location - This does not change from location to location - """ - return super().__new__( - cls, - path_to_item=path_to_item, - from_server=from_server, - configuration=configuration, - seen_classes=seen_classes, - validated_path_to_schemas=validated_path_to_schemas - ) - - def validation_ran_earlier(self, cls: type) -> bool: - validated_schemas = self.validated_path_to_schemas.get(self.path_to_item, set()) - validation_ran_earlier = validated_schemas and cls in validated_schemas - if validation_ran_earlier: - return True - if cls in self.seen_classes: - return True - return False - - @property - def path_to_item(self) -> typing.Tuple[typing.Union[str, int], ...]: - return self.get('path_to_item') - - @property - def from_server(self) -> bool: - return self.get('from_server') - - @property - def configuration(self) -> typing.Optional[Configuration]: - return self.get('configuration') - - @property - def seen_classes(self) -> typing.FrozenSet[typing.Type]: - return self.get('seen_classes') - - @property - def validated_path_to_schemas(self) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]]: - return self.get('validated_path_to_schemas') - - -def add_deeper_validated_schemas(validation_metadata: ValidationMetadata, path_to_schemas: dict): - # this is called if validation_ran_earlier and current and deeper locations need to be added - current_path_to_item = validation_metadata.path_to_item - other_path_to_schemas = {} - for path_to_item, schemas in validation_metadata.validated_path_to_schemas.items(): - if len(path_to_item) < len(current_path_to_item): - continue - path_begins_with_current_path = path_to_item[:len(current_path_to_item)] == current_path_to_item - if path_begins_with_current_path: - other_path_to_schemas[path_to_item] = schemas - update(path_to_schemas, other_path_to_schemas) - - -class Singleton: - """ - Enums and singletons are the same - The same instance is returned for a given key of (cls, _arg) - """ - _instances = {} - - def __new__(cls, _arg: typing.Any, **kwargs): - """ - cls base classes: BoolClass, NoneClass, str, decimal.Decimal - The 3rd key is used in the tuple below for a corner case where an enum contains integer 1 - However 1.0 can also be ingested into that enum schema because 1.0 == 1 and - Decimal('1.0') == Decimal('1') - But if we omitted the 3rd value in the key, then Decimal('1.0') would be stored as Decimal('1') - and json serializing that instance would be '1' rather than the expected '1.0' - Adding the 3rd value, the str of _arg ensures that 1.0 -> Decimal('1.0') which is serialized as 1.0 - """ - key = (cls, _arg, str(_arg)) - if key not in cls._instances: - if isinstance(_arg, (none_type, bool, BoolClass, NoneClass)): - inst = super().__new__(cls) - cls._instances[key] = inst - else: - cls._instances[key] = super().__new__(cls, _arg) - return cls._instances[key] - - def __repr__(self): - if isinstance(self, NoneClass): - return f'<{self.__class__.__name__}: None>' - elif isinstance(self, BoolClass): - if bool(self): - return f'<{self.__class__.__name__}: True>' - return f'<{self.__class__.__name__}: False>' - return f'<{self.__class__.__name__}: {super().__repr__()}>' - - -class classproperty: - - def __init__(self, fget): - self.fget = fget - - def __get__(self, owner_self, owner_cls): - return self.fget(owner_cls) - - -class NoneClass(Singleton): - @classproperty - def NONE(cls): - return cls(None) - - def __bool__(self) -> bool: - return False - - -class BoolClass(Singleton): - @classproperty - def TRUE(cls): - return cls(True) - - @classproperty - def FALSE(cls): - return cls(False) - - @functools.lru_cache() - def __bool__(self) -> bool: - for key, instance in self._instances.items(): - if self is instance: - return bool(key[1]) - raise ValueError('Unable to find the boolean value of this instance') - - -class MetaOapgTyped: - exclusive_maximum: typing.Union[int, float] - inclusive_maximum: typing.Union[int, float] - exclusive_minimum: typing.Union[int, float] - inclusive_minimum: typing.Union[int, float] - max_items: int - min_items: int - discriminator: typing.Dict[str, typing.Dict[str, typing.Type['Schema']]] - - - class properties: - # to hold object properties - pass - - additional_properties: typing.Optional[typing.Type['Schema']] - max_properties: int - min_properties: int - all_of: typing.List[typing.Type['Schema']] - one_of: typing.List[typing.Type['Schema']] - any_of: typing.List[typing.Type['Schema']] - not_schema: typing.Type['Schema'] - max_length: int - min_length: int - items: typing.Type['Schema'] - - -class Schema: - """ - the base class of all swagger/openapi schemas/models - """ - __inheritable_primitive_types_set = {decimal.Decimal, str, tuple, frozendict.frozendict, FileIO, bytes, BoolClass, NoneClass} - _types: typing.Set[typing.Type] - MetaOapg = MetaOapgTyped - - @staticmethod - def __get_valid_classes_phrase(input_classes): - """Returns a string phrase describing what types are allowed""" - all_classes = list(input_classes) - all_classes = sorted(all_classes, key=lambda cls: cls.__name__) - all_class_names = [cls.__name__ for cls in all_classes] - if len(all_class_names) == 1: - return "is {0}".format(all_class_names[0]) - return "is one of [{0}]".format(", ".join(all_class_names)) - - @staticmethod - def _get_class_oapg(item_cls: typing.Union[types.FunctionType, staticmethod, typing.Type['Schema']]) -> typing.Type['Schema']: - if isinstance(item_cls, types.FunctionType): - # referenced schema - return item_cls() - elif isinstance(item_cls, staticmethod): - # referenced schema - return item_cls.__func__() - return item_cls - - @classmethod - def __type_error_message( - cls, var_value=None, var_name=None, valid_classes=None, key_type=None - ): - """ - Keyword Args: - var_value (any): the variable which has the type_error - var_name (str): the name of the variable which has the typ error - valid_classes (tuple): the accepted classes for current_item's - value - key_type (bool): False if our value is a value in a dict - True if it is a key in a dict - False if our item is an item in a tuple - """ - key_or_value = "value" - if key_type: - key_or_value = "key" - valid_classes_phrase = cls.__get_valid_classes_phrase(valid_classes) - msg = "Invalid type. Required {1} type {2} and " "passed type was {3}".format( - var_name, - key_or_value, - valid_classes_phrase, - type(var_value).__name__, - ) - return msg - - @classmethod - def __get_type_error(cls, var_value, path_to_item, valid_classes, key_type=False): - error_msg = cls.__type_error_message( - var_name=path_to_item[-1], - var_value=var_value, - valid_classes=valid_classes, - key_type=key_type, - ) - return ApiTypeError( - error_msg, - path_to_item=path_to_item, - valid_classes=valid_classes, - key_type=key_type, - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: - """ - Schema _validate_oapg - All keyword validation except for type checking was done in calling stack frames - If those validations passed, the validated classes are collected in path_to_schemas - - Returns: - path_to_schemas: a map of path to schemas - - Raises: - ApiValueError: when a string can't be converted into a date or datetime and it must be one of those classes - ApiTypeError: when the input type is not in the list of allowed spec types - """ - base_class = type(arg) - if base_class not in cls._types: - raise cls.__get_type_error( - arg, - validation_metadata.path_to_item, - cls._types, - key_type=False, - ) - - path_to_schemas = {validation_metadata.path_to_item: set()} - path_to_schemas[validation_metadata.path_to_item].add(cls) - path_to_schemas[validation_metadata.path_to_item].add(base_class) - return path_to_schemas - - @staticmethod - def _process_schema_classes_oapg( - schema_classes: typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]] - ): - """ - Processes and mutates schema_classes - If a SomeSchema is a subclass of DictSchema then remove DictSchema because it is already included - """ - if len(schema_classes) < 2: - return - if len(schema_classes) > 2 and UnsetAnyTypeSchema in schema_classes: - schema_classes.remove(UnsetAnyTypeSchema) - x_schema = schema_type_classes & schema_classes - if not x_schema: - return - x_schema = x_schema.pop() - if any(c is not x_schema and issubclass(c, x_schema) for c in schema_classes): - # needed to not have a mro error in get_new_class - schema_classes.remove(x_schema) - - @classmethod - def __get_new_cls( - cls, - arg, - validation_metadata: ValidationMetadata - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']]: - """ - Make a new dynamic class and return an instance of that class - We are making an instance of cls, but instead of making cls - make a new class, new_cls - which includes dynamic bases including cls - return an instance of that new class - - Dict property + List Item Assignment Use cases: - 1. value is NOT an instance of the required schema class - the value is validated by _validate_oapg - _validate_oapg returns a key value pair - where the key is the path to the item, and the value will be the required manufactured class - made out of the matching schemas - 2. value is an instance of the correct schema type - the value is NOT validated by _validate_oapg, _validate_oapg only checks that the instance is of the correct schema type - for this value, _validate_oapg does NOT return an entry for it in _path_to_schemas - and in list/dict _get_items_oapg,_get_properties_oapg the value will be directly assigned - because value is of the correct type, and validation was run earlier when the instance was created - """ - _path_to_schemas = {} - if validation_metadata.validation_ran_earlier(cls): - add_deeper_validated_schemas(validation_metadata, _path_to_schemas) - else: - other_path_to_schemas = cls._validate_oapg(arg, validation_metadata=validation_metadata) - update(_path_to_schemas, other_path_to_schemas) - # loop through it make a new class for each entry - # do not modify the returned result because it is cached and we would be modifying the cached value - path_to_schemas = {} - for path, schema_classes in _path_to_schemas.items(): - """ - Use cases - 1. N number of schema classes + enum + type != bool/None, classes in path_to_schemas: tuple/frozendict.frozendict/str/Decimal/bytes/FileIo - needs Singleton added - 2. N number of schema classes + enum + type == bool/None, classes in path_to_schemas: BoolClass/NoneClass - Singleton already added - 3. N number of schema classes, classes in path_to_schemas: BoolClass/NoneClass/tuple/frozendict.frozendict/str/Decimal/bytes/FileIo - """ - cls._process_schema_classes_oapg(schema_classes) - enum_schema = any( - issubclass(this_cls, EnumBase) for this_cls in schema_classes) - inheritable_primitive_type = schema_classes.intersection(cls.__inheritable_primitive_types_set) - chosen_schema_classes = schema_classes - inheritable_primitive_type - suffix = tuple(inheritable_primitive_type) - if enum_schema and suffix[0] not in {NoneClass, BoolClass}: - suffix = (Singleton,) + suffix - - used_classes = tuple(sorted(chosen_schema_classes, key=lambda a_cls: a_cls.__name__)) + suffix - mfg_cls = get_new_class(class_name='DynamicSchema', bases=used_classes) - path_to_schemas[path] = mfg_cls - - return path_to_schemas - - @classmethod - def _get_new_instance_without_conversion_oapg( - cls, - arg: typing.Any, - path_to_item: typing.Tuple[typing.Union[str, int], ...], - path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']] - ): - # We have a Dynamic class and we are making an instance of it - if issubclass(cls, frozendict.frozendict) and issubclass(cls, DictBase): - properties = cls._get_properties_oapg(arg, path_to_item, path_to_schemas) - return super(Schema, cls).__new__(cls, properties) - elif issubclass(cls, tuple) and issubclass(cls, ListBase): - items = cls._get_items_oapg(arg, path_to_item, path_to_schemas) - return super(Schema, cls).__new__(cls, items) - """ - str = openapi str, date, and datetime - decimal.Decimal = openapi int and float - FileIO = openapi binary type and the user inputs a file - bytes = openapi binary type and the user inputs bytes - """ - return super(Schema, cls).__new__(cls, arg) - - @classmethod - def from_openapi_data_oapg( - cls, - arg: typing.Union[ - str, - date, - datetime, - int, - float, - decimal.Decimal, - bool, - None, - 'Schema', - dict, - frozendict.frozendict, - tuple, - list, - io.FileIO, - io.BufferedReader, - bytes - ], - _configuration: typing.Optional[Configuration] - ): - """ - Schema from_openapi_data_oapg - """ - from_server = True - validated_path_to_schemas = {} - arg = cast_to_allowed_types(arg, from_server, validated_path_to_schemas) - validation_metadata = ValidationMetadata( - from_server=from_server, configuration=_configuration, validated_path_to_schemas=validated_path_to_schemas) - path_to_schemas = cls.__get_new_cls(arg, validation_metadata) - new_cls = path_to_schemas[validation_metadata.path_to_item] - new_inst = new_cls._get_new_instance_without_conversion_oapg( - arg, - validation_metadata.path_to_item, - path_to_schemas - ) - return new_inst - - @staticmethod - def __get_input_dict(*args, **kwargs) -> frozendict.frozendict: - input_dict = {} - if args and isinstance(args[0], (dict, frozendict.frozendict)): - input_dict.update(args[0]) - if kwargs: - input_dict.update(kwargs) - return frozendict.frozendict(input_dict) - - @staticmethod - def __remove_unsets(kwargs): - return {key: val for key, val in kwargs.items() if val is not unset} - - def __new__(cls, *_args: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema'], _configuration: typing.Optional[Configuration] = None, **kwargs: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema', Unset]): - """ - Schema __new__ - - Args: - _args (int/float/decimal.Decimal/str/list/tuple/dict/frozendict.frozendict/bool/None): the value - kwargs (str, int/float/decimal.Decimal/str/list/tuple/dict/frozendict.frozendict/bool/None): dict values - _configuration: contains the Configuration that enables json schema validation keywords - like minItems, minLength etc - - Note: double underscores are used here because pycharm thinks that these variables - are instance properties if they are named normally :( - """ - __kwargs = cls.__remove_unsets(kwargs) - if not _args and not __kwargs: - raise TypeError( - 'No input given. args or kwargs must be given.' - ) - if not __kwargs and _args and not isinstance(_args[0], dict): - __arg = _args[0] - else: - __arg = cls.__get_input_dict(*_args, **__kwargs) - __from_server = False - __validated_path_to_schemas = {} - __arg = cast_to_allowed_types( - __arg, __from_server, __validated_path_to_schemas) - __validation_metadata = ValidationMetadata( - configuration=_configuration, from_server=__from_server, validated_path_to_schemas=__validated_path_to_schemas) - __path_to_schemas = cls.__get_new_cls(__arg, __validation_metadata) - __new_cls = __path_to_schemas[__validation_metadata.path_to_item] - return __new_cls._get_new_instance_without_conversion_oapg( - __arg, - __validation_metadata.path_to_item, - __path_to_schemas - ) - - def __init__( - self, - *_args: typing.Union[ - dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema'], - _configuration: typing.Optional[Configuration] = None, - **kwargs: typing.Union[ - dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema', Unset - ] - ): - """ - this is needed to fix 'Unexpected argument' warning in pycharm - this code does nothing because all Schema instances are immutable - this means that all input data is passed into and used in new, and after the new instance is made - no new attributes are assigned and init is not used - """ - pass - -""" -import itertools -data_types = ('None', 'FrozenDict', 'Tuple', 'Str', 'Decimal', 'Bool') -type_to_cls = { - 'None': 'NoneClass', - 'FrozenDict': 'frozendict.frozendict', - 'Tuple': 'tuple', - 'Str': 'str', - 'Decimal': 'decimal.Decimal', - 'Bool': 'BoolClass' -} -cls_tuples = [v for v in itertools.combinations(data_types, 5)] -typed_classes = [f"class {''.join(cls_tuple)}Mixin({', '.join(type_to_cls[typ] for typ in cls_tuple)}):\n pass" for cls_tuple in cls_tuples] -for cls in typed_classes: - print(cls) -object_classes = [f"{''.join(cls_tuple)}Mixin = object" for cls_tuple in cls_tuples] -for cls in object_classes: - print(cls) -""" -if typing.TYPE_CHECKING: - # qty 1 - NoneMixin = NoneClass - FrozenDictMixin = frozendict.frozendict - TupleMixin = tuple - StrMixin = str - DecimalMixin = decimal.Decimal - BoolMixin = BoolClass - BytesMixin = bytes - FileMixin = FileIO - # qty 2 - class BinaryMixin(bytes, FileIO): - pass - class NoneFrozenDictMixin(NoneClass, frozendict.frozendict): - pass - class NoneTupleMixin(NoneClass, tuple): - pass - class NoneStrMixin(NoneClass, str): - pass - class NoneDecimalMixin(NoneClass, decimal.Decimal): - pass - class NoneBoolMixin(NoneClass, BoolClass): - pass - class FrozenDictTupleMixin(frozendict.frozendict, tuple): - pass - class FrozenDictStrMixin(frozendict.frozendict, str): - pass - class FrozenDictDecimalMixin(frozendict.frozendict, decimal.Decimal): - pass - class FrozenDictBoolMixin(frozendict.frozendict, BoolClass): - pass - class TupleStrMixin(tuple, str): - pass - class TupleDecimalMixin(tuple, decimal.Decimal): - pass - class TupleBoolMixin(tuple, BoolClass): - pass - class StrDecimalMixin(str, decimal.Decimal): - pass - class StrBoolMixin(str, BoolClass): - pass - class DecimalBoolMixin(decimal.Decimal, BoolClass): - pass - # qty 3 - class NoneFrozenDictTupleMixin(NoneClass, frozendict.frozendict, tuple): - pass - class NoneFrozenDictStrMixin(NoneClass, frozendict.frozendict, str): - pass - class NoneFrozenDictDecimalMixin(NoneClass, frozendict.frozendict, decimal.Decimal): - pass - class NoneFrozenDictBoolMixin(NoneClass, frozendict.frozendict, BoolClass): - pass - class NoneTupleStrMixin(NoneClass, tuple, str): - pass - class NoneTupleDecimalMixin(NoneClass, tuple, decimal.Decimal): - pass - class NoneTupleBoolMixin(NoneClass, tuple, BoolClass): - pass - class NoneStrDecimalMixin(NoneClass, str, decimal.Decimal): - pass - class NoneStrBoolMixin(NoneClass, str, BoolClass): - pass - class NoneDecimalBoolMixin(NoneClass, decimal.Decimal, BoolClass): - pass - class FrozenDictTupleStrMixin(frozendict.frozendict, tuple, str): - pass - class FrozenDictTupleDecimalMixin(frozendict.frozendict, tuple, decimal.Decimal): - pass - class FrozenDictTupleBoolMixin(frozendict.frozendict, tuple, BoolClass): - pass - class FrozenDictStrDecimalMixin(frozendict.frozendict, str, decimal.Decimal): - pass - class FrozenDictStrBoolMixin(frozendict.frozendict, str, BoolClass): - pass - class FrozenDictDecimalBoolMixin(frozendict.frozendict, decimal.Decimal, BoolClass): - pass - class TupleStrDecimalMixin(tuple, str, decimal.Decimal): - pass - class TupleStrBoolMixin(tuple, str, BoolClass): - pass - class TupleDecimalBoolMixin(tuple, decimal.Decimal, BoolClass): - pass - class StrDecimalBoolMixin(str, decimal.Decimal, BoolClass): - pass - # qty 4 - class NoneFrozenDictTupleStrMixin(NoneClass, frozendict.frozendict, tuple, str): - pass - class NoneFrozenDictTupleDecimalMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal): - pass - class NoneFrozenDictTupleBoolMixin(NoneClass, frozendict.frozendict, tuple, BoolClass): - pass - class NoneFrozenDictStrDecimalMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal): - pass - class NoneFrozenDictStrBoolMixin(NoneClass, frozendict.frozendict, str, BoolClass): - pass - class NoneFrozenDictDecimalBoolMixin(NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass): - pass - class NoneTupleStrDecimalMixin(NoneClass, tuple, str, decimal.Decimal): - pass - class NoneTupleStrBoolMixin(NoneClass, tuple, str, BoolClass): - pass - class NoneTupleDecimalBoolMixin(NoneClass, tuple, decimal.Decimal, BoolClass): - pass - class NoneStrDecimalBoolMixin(NoneClass, str, decimal.Decimal, BoolClass): - pass - class FrozenDictTupleStrDecimalMixin(frozendict.frozendict, tuple, str, decimal.Decimal): - pass - class FrozenDictTupleStrBoolMixin(frozendict.frozendict, tuple, str, BoolClass): - pass - class FrozenDictTupleDecimalBoolMixin(frozendict.frozendict, tuple, decimal.Decimal, BoolClass): - pass - class FrozenDictStrDecimalBoolMixin(frozendict.frozendict, str, decimal.Decimal, BoolClass): - pass - class TupleStrDecimalBoolMixin(tuple, str, decimal.Decimal, BoolClass): - pass - # qty 5 - class NoneFrozenDictTupleStrDecimalMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal): - pass - class NoneFrozenDictTupleStrBoolMixin(NoneClass, frozendict.frozendict, tuple, str, BoolClass): - pass - class NoneFrozenDictTupleDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass): - pass - class NoneFrozenDictStrDecimalBoolMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass): - pass - class NoneTupleStrDecimalBoolMixin(NoneClass, tuple, str, decimal.Decimal, BoolClass): - pass - class FrozenDictTupleStrDecimalBoolMixin(frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass): - pass - # qty 6 - class NoneFrozenDictTupleStrDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass): - pass - # qty 8 - class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes): - pass -else: - # qty 1 - class NoneMixin: - _types = {NoneClass} - class FrozenDictMixin: - _types = {frozendict.frozendict} - class TupleMixin: - _types = {tuple} - class StrMixin: - _types = {str} - class DecimalMixin: - _types = {decimal.Decimal} - class BoolMixin: - _types = {BoolClass} - class BytesMixin: - _types = {bytes} - class FileMixin: - _types = {FileIO} - # qty 2 - class BinaryMixin: - _types = {bytes, FileIO} - class NoneFrozenDictMixin: - _types = {NoneClass, frozendict.frozendict} - class NoneTupleMixin: - _types = {NoneClass, tuple} - class NoneStrMixin: - _types = {NoneClass, str} - class NoneDecimalMixin: - _types = {NoneClass, decimal.Decimal} - class NoneBoolMixin: - _types = {NoneClass, BoolClass} - class FrozenDictTupleMixin: - _types = {frozendict.frozendict, tuple} - class FrozenDictStrMixin: - _types = {frozendict.frozendict, str} - class FrozenDictDecimalMixin: - _types = {frozendict.frozendict, decimal.Decimal} - class FrozenDictBoolMixin: - _types = {frozendict.frozendict, BoolClass} - class TupleStrMixin: - _types = {tuple, str} - class TupleDecimalMixin: - _types = {tuple, decimal.Decimal} - class TupleBoolMixin: - _types = {tuple, BoolClass} - class StrDecimalMixin: - _types = {str, decimal.Decimal} - class StrBoolMixin: - _types = {str, BoolClass} - class DecimalBoolMixin: - _types = {decimal.Decimal, BoolClass} - # qty 3 - class NoneFrozenDictTupleMixin: - _types = {NoneClass, frozendict.frozendict, tuple} - class NoneFrozenDictStrMixin: - _types = {NoneClass, frozendict.frozendict, str} - class NoneFrozenDictDecimalMixin: - _types = {NoneClass, frozendict.frozendict, decimal.Decimal} - class NoneFrozenDictBoolMixin: - _types = {NoneClass, frozendict.frozendict, BoolClass} - class NoneTupleStrMixin: - _types = {NoneClass, tuple, str} - class NoneTupleDecimalMixin: - _types = {NoneClass, tuple, decimal.Decimal} - class NoneTupleBoolMixin: - _types = {NoneClass, tuple, BoolClass} - class NoneStrDecimalMixin: - _types = {NoneClass, str, decimal.Decimal} - class NoneStrBoolMixin: - _types = {NoneClass, str, BoolClass} - class NoneDecimalBoolMixin: - _types = {NoneClass, decimal.Decimal, BoolClass} - class FrozenDictTupleStrMixin: - _types = {frozendict.frozendict, tuple, str} - class FrozenDictTupleDecimalMixin: - _types = {frozendict.frozendict, tuple, decimal.Decimal} - class FrozenDictTupleBoolMixin: - _types = {frozendict.frozendict, tuple, BoolClass} - class FrozenDictStrDecimalMixin: - _types = {frozendict.frozendict, str, decimal.Decimal} - class FrozenDictStrBoolMixin: - _types = {frozendict.frozendict, str, BoolClass} - class FrozenDictDecimalBoolMixin: - _types = {frozendict.frozendict, decimal.Decimal, BoolClass} - class TupleStrDecimalMixin: - _types = {tuple, str, decimal.Decimal} - class TupleStrBoolMixin: - _types = {tuple, str, BoolClass} - class TupleDecimalBoolMixin: - _types = {tuple, decimal.Decimal, BoolClass} - class StrDecimalBoolMixin: - _types = {str, decimal.Decimal, BoolClass} - # qty 4 - class NoneFrozenDictTupleStrMixin: - _types = {NoneClass, frozendict.frozendict, tuple, str} - class NoneFrozenDictTupleDecimalMixin: - _types = {NoneClass, frozendict.frozendict, tuple, decimal.Decimal} - class NoneFrozenDictTupleBoolMixin: - _types = {NoneClass, frozendict.frozendict, tuple, BoolClass} - class NoneFrozenDictStrDecimalMixin: - _types = {NoneClass, frozendict.frozendict, str, decimal.Decimal} - class NoneFrozenDictStrBoolMixin: - _types = {NoneClass, frozendict.frozendict, str, BoolClass} - class NoneFrozenDictDecimalBoolMixin: - _types = {NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass} - class NoneTupleStrDecimalMixin: - _types = {NoneClass, tuple, str, decimal.Decimal} - class NoneTupleStrBoolMixin: - _types = {NoneClass, tuple, str, BoolClass} - class NoneTupleDecimalBoolMixin: - _types = {NoneClass, tuple, decimal.Decimal, BoolClass} - class NoneStrDecimalBoolMixin: - _types = {NoneClass, str, decimal.Decimal, BoolClass} - class FrozenDictTupleStrDecimalMixin: - _types = {frozendict.frozendict, tuple, str, decimal.Decimal} - class FrozenDictTupleStrBoolMixin: - _types = {frozendict.frozendict, tuple, str, BoolClass} - class FrozenDictTupleDecimalBoolMixin: - _types = {frozendict.frozendict, tuple, decimal.Decimal, BoolClass} - class FrozenDictStrDecimalBoolMixin: - _types = {frozendict.frozendict, str, decimal.Decimal, BoolClass} - class TupleStrDecimalBoolMixin: - _types = {tuple, str, decimal.Decimal, BoolClass} - # qty 5 - class NoneFrozenDictTupleStrDecimalMixin: - _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal} - class NoneFrozenDictTupleStrBoolMixin: - _types = {NoneClass, frozendict.frozendict, tuple, str, BoolClass} - class NoneFrozenDictTupleDecimalBoolMixin: - _types = {NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass} - class NoneFrozenDictStrDecimalBoolMixin: - _types = {NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass} - class NoneTupleStrDecimalBoolMixin: - _types = {NoneClass, tuple, str, decimal.Decimal, BoolClass} - class FrozenDictTupleStrDecimalBoolMixin: - _types = {frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass} - # qty 6 - class NoneFrozenDictTupleStrDecimalBoolMixin: - _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass} - # qty 8 - class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin: - _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes} - - -class ValidatorBase: - @staticmethod - def _is_json_validation_enabled_oapg(schema_keyword, configuration=None): - """Returns true if JSON schema validation is enabled for the specified - validation keyword. This can be used to skip JSON schema structural validation - as requested in the configuration. - Note: the suffix _oapg stands for openapi python (experimental) generator and - it has been added to prevent collisions with other methods and properties - - Args: - schema_keyword (string): the name of a JSON schema validation keyword. - configuration (Configuration): the configuration class. - """ - - return (configuration is None or - not hasattr(configuration, '_disabled_client_side_validations') or - schema_keyword not in configuration._disabled_client_side_validations) - - @staticmethod - def _raise_validation_error_message_oapg(value, constraint_msg, constraint_value, path_to_item, additional_txt=""): - raise ApiValueError( - "Invalid value `{value}`, {constraint_msg} `{constraint_value}`{additional_txt} at {path_to_item}".format( - value=value, - constraint_msg=constraint_msg, - constraint_value=constraint_value, - additional_txt=additional_txt, - path_to_item=path_to_item, - ) - ) - - -class EnumBase: - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: - """ - EnumBase _validate_oapg - Validates that arg is in the enum's allowed values - """ - try: - cls.MetaOapg.enum_value_to_name[arg] - except KeyError: - raise ApiValueError("Invalid value {} passed in to {}, allowed_values={}".format(arg, cls, cls.MetaOapg.enum_value_to_name.keys())) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class BoolBase: - def is_true_oapg(self) -> bool: - """ - A replacement for x is True - True if the instance is a BoolClass True Singleton - """ - if not issubclass(self.__class__, BoolClass): - return False - return bool(self) - - def is_false_oapg(self) -> bool: - """ - A replacement for x is False - True if the instance is a BoolClass False Singleton - """ - if not issubclass(self.__class__, BoolClass): - return False - return bool(self) is False - - -class NoneBase: - def is_none_oapg(self) -> bool: - """ - A replacement for x is None - True if the instance is a NoneClass None Singleton - """ - if issubclass(self.__class__, NoneClass): - return True - return False - - -class StrBase(ValidatorBase): - MetaOapg: MetaOapgTyped - - @property - def as_str_oapg(self) -> str: - return self - - @property - def as_date_oapg(self) -> date: - raise Exception('not implemented') - - @property - def as_datetime_oapg(self) -> datetime: - raise Exception('not implemented') - - @property - def as_decimal_oapg(self) -> decimal.Decimal: - raise Exception('not implemented') - - @property - def as_uuid_oapg(self) -> uuid.UUID: - raise Exception('not implemented') - - @classmethod - def __check_str_validations( - cls, - arg: str, - validation_metadata: ValidationMetadata - ): - if not hasattr(cls, 'MetaOapg'): - return - if (cls._is_json_validation_enabled_oapg('maxLength', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'max_length') and - len(arg) > cls.MetaOapg.max_length): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="length must be less than or equal to", - constraint_value=cls.MetaOapg.max_length, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('minLength', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'min_length') and - len(arg) < cls.MetaOapg.min_length): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="length must be greater than or equal to", - constraint_value=cls.MetaOapg.min_length, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('pattern', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'regex')): - for regex_dict in cls.MetaOapg.regex: - flags = regex_dict.get('flags', 0) - if not re.search(regex_dict['pattern'], arg, flags=flags): - if flags != 0: - # Don't print the regex flags if the flags are not - # specified in the OAS document. - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="must match regular expression", - constraint_value=regex_dict['pattern'], - path_to_item=validation_metadata.path_to_item, - additional_txt=" with flags=`{}`".format(flags) - ) - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="must match regular expression", - constraint_value=regex_dict['pattern'], - path_to_item=validation_metadata.path_to_item - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: - """ - StrBase _validate_oapg - Validates that validations pass - """ - if isinstance(arg, str): - cls.__check_str_validations(arg, validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class UUIDBase: - @property - @functools.lru_cache() - def as_uuid_oapg(self) -> uuid.UUID: - return uuid.UUID(self) - - @classmethod - def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata): - if isinstance(arg, str): - try: - uuid.UUID(arg) - return True - except ValueError: - raise ApiValueError( - "Invalid value '{}' for type UUID at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: typing.Optional[ValidationMetadata] = None, - ): - """ - UUIDBase _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class CustomIsoparser(isoparser): - - @_takes_ascii - def parse_isodatetime(self, dt_str): - components, pos = self._parse_isodate(dt_str) - if len(dt_str) > pos: - if self._sep is None or dt_str[pos:pos + 1] == self._sep: - components += self._parse_isotime(dt_str[pos + 1:]) - else: - raise ValueError('String contains unknown ISO components') - - if len(components) > 3 and components[3] == 24: - components[3] = 0 - return datetime(*components) + timedelta(days=1) - - if len(components) <= 3: - raise ValueError('Value is not a datetime') - - return datetime(*components) - - @_takes_ascii - def parse_isodate(self, datestr): - components, pos = self._parse_isodate(datestr) - - if len(datestr) > pos: - raise ValueError('String contains invalid time components') - - if len(components) > 3: - raise ValueError('String contains invalid time components') - - return date(*components) - - -DEFAULT_ISOPARSER = CustomIsoparser() - - -class DateBase: - @property - @functools.lru_cache() - def as_date_oapg(self) -> date: - return DEFAULT_ISOPARSER.parse_isodate(self) - - @classmethod - def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata): - if isinstance(arg, str): - try: - DEFAULT_ISOPARSER.parse_isodate(arg) - return True - except ValueError: - raise ApiValueError( - "Value does not conform to the required ISO-8601 date format. " - "Invalid value '{}' for type date at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: typing.Optional[ValidationMetadata] = None, - ): - """ - DateBase _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class DateTimeBase: - @property - @functools.lru_cache() - def as_datetime_oapg(self) -> datetime: - return DEFAULT_ISOPARSER.parse_isodatetime(self) - - @classmethod - def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata): - if isinstance(arg, str): - try: - DEFAULT_ISOPARSER.parse_isodatetime(arg) - return True - except ValueError: - raise ApiValueError( - "Value does not conform to the required ISO-8601 datetime format. " - "Invalid value '{}' for type datetime at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - DateTimeBase _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class DecimalBase: - """ - A class for storing decimals that are sent over the wire as strings - These schemas must remain based on StrBase rather than NumberBase - because picking base classes must be deterministic - """ - - @property - @functools.lru_cache() - def as_decimal_oapg(self) -> decimal.Decimal: - return decimal.Decimal(self) - - @classmethod - def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata): - if isinstance(arg, str): - try: - decimal.Decimal(arg) - return True - except decimal.InvalidOperation: - raise ApiValueError( - "Value cannot be converted to a decimal. " - "Invalid value '{}' for type decimal at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - DecimalBase _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class NumberBase(ValidatorBase): - MetaOapg: MetaOapgTyped - - @property - def as_int_oapg(self) -> int: - try: - return self._as_int - except AttributeError: - """ - Note: for some numbers like 9.0 they could be represented as an - integer but our code chooses to store them as - >>> Decimal('9.0').as_tuple() - DecimalTuple(sign=0, digits=(9, 0), exponent=-1) - so we can tell that the value came from a float and convert it back to a float - during later serialization - """ - if self.as_tuple().exponent < 0: - # this could be represented as an integer but should be represented as a float - # because that's what it was serialized from - raise ApiValueError(f'{self} is not an integer') - self._as_int = int(self) - return self._as_int - - @property - def as_float_oapg(self) -> float: - try: - return self._as_float - except AttributeError: - if self.as_tuple().exponent >= 0: - raise ApiValueError(f'{self} is not a float') - self._as_float = float(self) - return self._as_float - - @classmethod - def __check_numeric_validations( - cls, - arg, - validation_metadata: ValidationMetadata - ): - if not hasattr(cls, 'MetaOapg'): - return - if cls._is_json_validation_enabled_oapg('multipleOf', - validation_metadata.configuration) and hasattr(cls.MetaOapg, 'multiple_of'): - multiple_of_value = cls.MetaOapg.multiple_of - if (not (float(arg) / multiple_of_value).is_integer()): - # Note 'multipleOf' will be as good as the floating point arithmetic. - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="value must be a multiple of", - constraint_value=multiple_of_value, - path_to_item=validation_metadata.path_to_item - ) - - checking_max_or_min_values = any( - hasattr(cls.MetaOapg, validation_key) for validation_key in { - 'exclusive_maximum', - 'inclusive_maximum', - 'exclusive_minimum', - 'inclusive_minimum', - } - ) - if not checking_max_or_min_values: - return - - if (cls._is_json_validation_enabled_oapg('exclusiveMaximum', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'exclusive_maximum') and - arg >= cls.MetaOapg.exclusive_maximum): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="must be a value less than", - constraint_value=cls.MetaOapg.exclusive_maximum, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('maximum', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'inclusive_maximum') and - arg > cls.MetaOapg.inclusive_maximum): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="must be a value less than or equal to", - constraint_value=cls.MetaOapg.inclusive_maximum, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('exclusiveMinimum', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'exclusive_minimum') and - arg <= cls.MetaOapg.exclusive_minimum): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="must be a value greater than", - constraint_value=cls.MetaOapg.exclusive_maximum, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('minimum', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'inclusive_minimum') and - arg < cls.MetaOapg.inclusive_minimum): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="must be a value greater than or equal to", - constraint_value=cls.MetaOapg.inclusive_minimum, - path_to_item=validation_metadata.path_to_item - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: - """ - NumberBase _validate_oapg - Validates that validations pass - """ - if isinstance(arg, decimal.Decimal): - cls.__check_numeric_validations(arg, validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class ListBase(ValidatorBase): - MetaOapg: MetaOapgTyped - - @classmethod - def __validate_items(cls, list_items, validation_metadata: ValidationMetadata): - """ - Ensures that: - - values passed in for items are valid - Exceptions will be raised if: - - invalid arguments were passed in - - Args: - list_items: the input list of items - - Raises: - ApiTypeError - for missing required arguments, or for invalid properties - """ - - # if we have definitions for an items schema, use it - # otherwise accept anything - item_cls = getattr(cls.MetaOapg, 'items', UnsetAnyTypeSchema) - item_cls = cls._get_class_oapg(item_cls) - path_to_schemas = {} - for i, value in enumerate(list_items): - item_validation_metadata = ValidationMetadata( - from_server=validation_metadata.from_server, - configuration=validation_metadata.configuration, - path_to_item=validation_metadata.path_to_item+(i,), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas - ) - if item_validation_metadata.validation_ran_earlier(item_cls): - add_deeper_validated_schemas(item_validation_metadata, path_to_schemas) - continue - other_path_to_schemas = item_cls._validate_oapg( - value, validation_metadata=item_validation_metadata) - update(path_to_schemas, other_path_to_schemas) - return path_to_schemas - - @classmethod - def __check_tuple_validations( - cls, arg, - validation_metadata: ValidationMetadata): - if not hasattr(cls, 'MetaOapg'): - return - if (cls._is_json_validation_enabled_oapg('maxItems', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'max_items') and - len(arg) > cls.MetaOapg.max_items): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="number of items must be less than or equal to", - constraint_value=cls.MetaOapg.max_items, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('minItems', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'min_items') and - len(arg) < cls.MetaOapg.min_items): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="number of items must be greater than or equal to", - constraint_value=cls.MetaOapg.min_items, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('uniqueItems', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'unique_items') and cls.MetaOapg.unique_items and arg): - unique_items = set(arg) - if len(arg) > len(unique_items): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="duplicate items were found, and the tuple must not contain duplicates because", - constraint_value='unique_items==True', - path_to_item=validation_metadata.path_to_item - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - ListBase _validate_oapg - We return dynamic classes of different bases depending upon the inputs - This makes it so: - - the returned instance is always a subclass of our defining schema - - this allows us to check type based on whether an instance is a subclass of a schema - - the returned instance is a serializable type (except for None, True, and False) which are enums - - Returns: - new_cls (type): the new class - - Raises: - ApiValueError: when a string can't be converted into a date or datetime and it must be one of those classes - ApiTypeError: when the input type is not in the list of allowed spec types - """ - if isinstance(arg, tuple): - cls.__check_tuple_validations(arg, validation_metadata) - _path_to_schemas = super()._validate_oapg(arg, validation_metadata=validation_metadata) - if not isinstance(arg, tuple): - return _path_to_schemas - updated_vm = ValidationMetadata( - configuration=validation_metadata.configuration, - from_server=validation_metadata.from_server, - path_to_item=validation_metadata.path_to_item, - seen_classes=validation_metadata.seen_classes | frozenset({cls}), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas - ) - other_path_to_schemas = cls.__validate_items(arg, validation_metadata=updated_vm) - update(_path_to_schemas, other_path_to_schemas) - return _path_to_schemas - - @classmethod - def _get_items_oapg( - cls: 'Schema', - arg: typing.List[typing.Any], - path_to_item: typing.Tuple[typing.Union[str, int], ...], - path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']] - ): - ''' - ListBase _get_items_oapg - ''' - cast_items = [] - - for i, value in enumerate(arg): - item_path_to_item = path_to_item + (i,) - item_cls = path_to_schemas[item_path_to_item] - new_value = item_cls._get_new_instance_without_conversion_oapg( - value, - item_path_to_item, - path_to_schemas - ) - cast_items.append(new_value) - - return cast_items - - -class Discriminable: - MetaOapg: MetaOapgTyped - - @classmethod - def _ensure_discriminator_value_present_oapg(cls, disc_property_name: str, validation_metadata: ValidationMetadata, *args): - if not args or args and disc_property_name not in args[0]: - # The input data does not contain the discriminator property - raise ApiValueError( - "Cannot deserialize input data due to missing discriminator. " - "The discriminator property '{}' is missing at path: {}".format(disc_property_name, validation_metadata.path_to_item) - ) - - @classmethod - def get_discriminated_class_oapg(cls, disc_property_name: str, disc_payload_value: str): - """ - Used in schemas with discriminators - """ - if not hasattr(cls.MetaOapg, 'discriminator'): - return None - disc = cls.MetaOapg.discriminator() - if disc_property_name not in disc: - return None - discriminated_cls = disc[disc_property_name].get(disc_payload_value) - if discriminated_cls is not None: - return discriminated_cls - if not hasattr(cls, 'MetaOapg'): - return None - elif not ( - hasattr(cls.MetaOapg, 'all_of') or - hasattr(cls.MetaOapg, 'one_of') or - hasattr(cls.MetaOapg, 'any_of') - ): - return None - # TODO stop traveling if a cycle is hit - if hasattr(cls.MetaOapg, 'all_of'): - for allof_cls in cls.MetaOapg.all_of(): - discriminated_cls = allof_cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=disc_payload_value) - if discriminated_cls is not None: - return discriminated_cls - if hasattr(cls.MetaOapg, 'one_of'): - for oneof_cls in cls.MetaOapg.one_of(): - discriminated_cls = oneof_cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=disc_payload_value) - if discriminated_cls is not None: - return discriminated_cls - if hasattr(cls.MetaOapg, 'any_of'): - for anyof_cls in cls.MetaOapg.any_of(): - discriminated_cls = anyof_cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=disc_payload_value) - if discriminated_cls is not None: - return discriminated_cls - return None - - -class DictBase(Discriminable, ValidatorBase): - - @classmethod - def __validate_arg_presence(cls, arg): - """ - Ensures that: - - all required arguments are passed in - - the input variable names are valid - - present in properties or - - accepted because additionalProperties exists - Exceptions will be raised if: - - invalid arguments were passed in - - a var_name is invalid if additional_properties == NotAnyTypeSchema - and var_name not in properties.__annotations__ - - required properties were not passed in - - Args: - arg: the input dict - - Raises: - ApiTypeError - for missing required arguments, or for invalid properties - """ - seen_required_properties = set() - invalid_arguments = [] - required_property_names = getattr(cls.MetaOapg, 'required', set()) - additional_properties = getattr(cls.MetaOapg, 'additional_properties', UnsetAnyTypeSchema) - properties = getattr(cls.MetaOapg, 'properties', {}) - property_annotations = getattr(properties, '__annotations__', {}) - for property_name in arg: - if property_name in required_property_names: - seen_required_properties.add(property_name) - elif property_name in property_annotations: - continue - elif additional_properties is not NotAnyTypeSchema: - continue - else: - invalid_arguments.append(property_name) - missing_required_arguments = list(required_property_names - seen_required_properties) - if missing_required_arguments: - missing_required_arguments.sort() - raise ApiTypeError( - "{} is missing {} required argument{}: {}".format( - cls.__name__, - len(missing_required_arguments), - "s" if len(missing_required_arguments) > 1 else "", - missing_required_arguments - ) - ) - if invalid_arguments: - invalid_arguments.sort() - raise ApiTypeError( - "{} was passed {} invalid argument{}: {}".format( - cls.__name__, - len(invalid_arguments), - "s" if len(invalid_arguments) > 1 else "", - invalid_arguments - ) - ) - - @classmethod - def __validate_args(cls, arg, validation_metadata: ValidationMetadata): - """ - Ensures that: - - values passed in for properties are valid - Exceptions will be raised if: - - invalid arguments were passed in - - Args: - arg: the input dict - - Raises: - ApiTypeError - for missing required arguments, or for invalid properties - """ - path_to_schemas = {} - additional_properties = getattr(cls.MetaOapg, 'additional_properties', UnsetAnyTypeSchema) - properties = getattr(cls.MetaOapg, 'properties', {}) - property_annotations = getattr(properties, '__annotations__', {}) - for property_name, value in arg.items(): - path_to_item = validation_metadata.path_to_item+(property_name,) - if property_name in property_annotations: - schema = property_annotations[property_name] - elif additional_properties is not NotAnyTypeSchema: - if additional_properties is UnsetAnyTypeSchema: - """ - If additionalProperties is unset and this path_to_item does not yet have - any validations on it, validate it. - If it already has validations on it, skip this validation. - """ - if path_to_item in path_to_schemas: - continue - schema = additional_properties - else: - raise ApiTypeError('Unable to find schema for value={} in class={} at path_to_item={}'.format( - value, cls, validation_metadata.path_to_item+(property_name,) - )) - schema = cls._get_class_oapg(schema) - arg_validation_metadata = ValidationMetadata( - from_server=validation_metadata.from_server, - configuration=validation_metadata.configuration, - path_to_item=path_to_item, - validated_path_to_schemas=validation_metadata.validated_path_to_schemas - ) - if arg_validation_metadata.validation_ran_earlier(schema): - add_deeper_validated_schemas(arg_validation_metadata, path_to_schemas) - continue - other_path_to_schemas = schema._validate_oapg(value, validation_metadata=arg_validation_metadata) - update(path_to_schemas, other_path_to_schemas) - return path_to_schemas - - @classmethod - def __check_dict_validations( - cls, - arg, - validation_metadata: ValidationMetadata - ): - if not hasattr(cls, 'MetaOapg'): - return - if (cls._is_json_validation_enabled_oapg('maxProperties', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'max_properties') and - len(arg) > cls.MetaOapg.max_properties): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="number of properties must be less than or equal to", - constraint_value=cls.MetaOapg.max_properties, - path_to_item=validation_metadata.path_to_item - ) - - if (cls._is_json_validation_enabled_oapg('minProperties', validation_metadata.configuration) and - hasattr(cls.MetaOapg, 'min_properties') and - len(arg) < cls.MetaOapg.min_properties): - cls._raise_validation_error_message_oapg( - value=arg, - constraint_msg="number of properties must be greater than or equal to", - constraint_value=cls.MetaOapg.min_properties, - path_to_item=validation_metadata.path_to_item - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - DictBase _validate_oapg - We return dynamic classes of different bases depending upon the inputs - This makes it so: - - the returned instance is always a subclass of our defining schema - - this allows us to check type based on whether an instance is a subclass of a schema - - the returned instance is a serializable type (except for None, True, and False) which are enums - - Returns: - new_cls (type): the new class - - Raises: - ApiValueError: when a string can't be converted into a date or datetime and it must be one of those classes - ApiTypeError: when the input type is not in the list of allowed spec types - """ - if isinstance(arg, frozendict.frozendict): - cls.__check_dict_validations(arg, validation_metadata) - _path_to_schemas = super()._validate_oapg(arg, validation_metadata=validation_metadata) - if not isinstance(arg, frozendict.frozendict): - return _path_to_schemas - cls.__validate_arg_presence(arg) - other_path_to_schemas = cls.__validate_args(arg, validation_metadata=validation_metadata) - update(_path_to_schemas, other_path_to_schemas) - try: - discriminator = cls.MetaOapg.discriminator() - except AttributeError: - return _path_to_schemas - # discriminator exists - disc_prop_name = list(discriminator.keys())[0] - cls._ensure_discriminator_value_present_oapg(disc_prop_name, validation_metadata, arg) - discriminated_cls = cls.get_discriminated_class_oapg( - disc_property_name=disc_prop_name, disc_payload_value=arg[disc_prop_name]) - if discriminated_cls is None: - raise ApiValueError( - "Invalid discriminator value was passed in to {}.{} Only the values {} are allowed at {}".format( - cls.__name__, - disc_prop_name, - list(discriminator[disc_prop_name].keys()), - validation_metadata.path_to_item + (disc_prop_name,) - ) - ) - updated_vm = ValidationMetadata( - configuration=validation_metadata.configuration, - from_server=validation_metadata.from_server, - path_to_item=validation_metadata.path_to_item, - seen_classes=validation_metadata.seen_classes | frozenset({cls}), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas - ) - if updated_vm.validation_ran_earlier(discriminated_cls): - add_deeper_validated_schemas(updated_vm, _path_to_schemas) - return _path_to_schemas - other_path_to_schemas = discriminated_cls._validate_oapg(arg, validation_metadata=updated_vm) - update(_path_to_schemas, other_path_to_schemas) - return _path_to_schemas - - @classmethod - def _get_properties_oapg( - cls, - arg: typing.Dict[str, typing.Any], - path_to_item: typing.Tuple[typing.Union[str, int], ...], - path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']] - ): - """ - DictBase _get_properties_oapg, this is how properties are set - These values already passed validation - """ - dict_items = {} - - for property_name_js, value in arg.items(): - property_path_to_item = path_to_item + (property_name_js,) - property_cls = path_to_schemas[property_path_to_item] - new_value = property_cls._get_new_instance_without_conversion_oapg( - value, - property_path_to_item, - path_to_schemas - ) - dict_items[property_name_js] = new_value - - return dict_items - - def __setattr__(self, name: str, value: typing.Any): - if not isinstance(self, FileIO): - raise AttributeError('property setting not supported on immutable instances') - - def __getattr__(self, name: str): - """ - for instance.name access - Properties are only type hinted for required properties - so that hasattr(instance, 'optionalProp') is False when that key is not present - """ - if not isinstance(self, frozendict.frozendict): - return super().__getattr__(name) - if name not in self.__class__.__annotations__: - raise AttributeError(f"{self} has no attribute '{name}'") - try: - value = self[name] - return value - except KeyError as ex: - raise AttributeError(str(ex)) - - def __getitem__(self, name: str): - """ - dict_instance[name] accessor - key errors thrown - """ - if not isinstance(self, frozendict.frozendict): - return super().__getattr__(name) - return super().__getitem__(name) - - def get_item_oapg(self, name: str) -> typing.Union['AnyTypeSchema', Unset]: - # dict_instance[name] accessor - if not isinstance(self, frozendict.frozendict): - raise NotImplementedError() - try: - return super().__getitem__(name) - except KeyError: - return unset - - -def cast_to_allowed_types( - arg: typing.Union[str, date, datetime, uuid.UUID, decimal.Decimal, int, float, None, dict, frozendict.frozendict, list, tuple, bytes, Schema, io.FileIO, io.BufferedReader], - from_server: bool, - validated_path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]], - path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(['args[0]']), -) -> typing.Union[frozendict.frozendict, tuple, decimal.Decimal, str, bytes, BoolClass, NoneClass, FileIO]: - """ - Casts the input payload arg into the allowed types - The input validated_path_to_schemas is mutated by running this function - - When from_server is False then - - date/datetime is cast to str - - int/float is cast to Decimal - - If a Schema instance is passed in it is converted back to a primitive instance because - One may need to validate that data to the original Schema class AND additional different classes - those additional classes will need to be added to the new manufactured class for that payload - If the code didn't do this and kept the payload as a Schema instance it would fail to validate to other - Schema classes and the code wouldn't be able to mfg a new class that includes all valid schemas - TODO: store the validated schema classes in validation_metadata - - Args: - arg: the payload - from_server: whether this payload came from the server or not - validated_path_to_schemas: a dict that stores the validated classes at any path location in the payload - """ - if isinstance(arg, Schema): - # store the already run validations - schema_classes = set() - for cls in arg.__class__.__bases__: - if cls is Singleton: - # Skip Singleton - continue - schema_classes.add(cls) - validated_path_to_schemas[path_to_item] = schema_classes - - type_error = ApiTypeError(f"Invalid type. Required value type is str and passed type was {type(arg)} at {path_to_item}") - if isinstance(arg, str): - return str(arg) - elif isinstance(arg, (dict, frozendict.frozendict)): - return frozendict.frozendict({key: cast_to_allowed_types(val, from_server, validated_path_to_schemas, path_to_item + (key,)) for key, val in arg.items()}) - elif isinstance(arg, (bool, BoolClass)): - """ - this check must come before isinstance(arg, (int, float)) - because isinstance(True, int) is True - """ - if arg: - return BoolClass.TRUE - return BoolClass.FALSE - elif isinstance(arg, int): - return decimal.Decimal(arg) - elif isinstance(arg, float): - decimal_from_float = decimal.Decimal(arg) - if decimal_from_float.as_integer_ratio()[1] == 1: - # 9.0 -> Decimal('9.0') - # 3.4028234663852886e+38 -> Decimal('340282346638528859811704183484516925440.0') - return decimal.Decimal(str(decimal_from_float)+'.0') - return decimal_from_float - elif isinstance(arg, (tuple, list)): - return tuple([cast_to_allowed_types(item, from_server, validated_path_to_schemas, path_to_item + (i,)) for i, item in enumerate(arg)]) - elif isinstance(arg, (none_type, NoneClass)): - return NoneClass.NONE - elif isinstance(arg, (date, datetime)): - if not from_server: - return arg.isoformat() - raise type_error - elif isinstance(arg, uuid.UUID): - if not from_server: - return str(arg) - raise type_error - elif isinstance(arg, decimal.Decimal): - return decimal.Decimal(arg) - elif isinstance(arg, bytes): - return bytes(arg) - elif isinstance(arg, (io.FileIO, io.BufferedReader)): - return FileIO(arg) - raise ValueError('Invalid type passed in got input={} type={}'.format(arg, type(arg))) - - -class ComposedBase(Discriminable): - - @classmethod - def __get_allof_classes(cls, arg, validation_metadata: ValidationMetadata): - path_to_schemas = defaultdict(set) - for allof_cls in cls.MetaOapg.all_of(): - if validation_metadata.validation_ran_earlier(allof_cls): - add_deeper_validated_schemas(validation_metadata, path_to_schemas) - continue - other_path_to_schemas = allof_cls._validate_oapg(arg, validation_metadata=validation_metadata) - update(path_to_schemas, other_path_to_schemas) - return path_to_schemas - - @classmethod - def __get_oneof_class( - cls, - arg, - discriminated_cls, - validation_metadata: ValidationMetadata, - ): - oneof_classes = [] - path_to_schemas = defaultdict(set) - for oneof_cls in cls.MetaOapg.one_of(): - if oneof_cls in path_to_schemas[validation_metadata.path_to_item]: - oneof_classes.append(oneof_cls) - continue - if validation_metadata.validation_ran_earlier(oneof_cls): - oneof_classes.append(oneof_cls) - add_deeper_validated_schemas(validation_metadata, path_to_schemas) - continue - try: - path_to_schemas = oneof_cls._validate_oapg(arg, validation_metadata=validation_metadata) - except (ApiValueError, ApiTypeError) as ex: - if discriminated_cls is not None and oneof_cls is discriminated_cls: - raise ex - continue - oneof_classes.append(oneof_cls) - if not oneof_classes: - raise ApiValueError( - "Invalid inputs given to generate an instance of {}. None " - "of the oneOf schemas matched the input data.".format(cls) - ) - elif len(oneof_classes) > 1: - raise ApiValueError( - "Invalid inputs given to generate an instance of {}. Multiple " - "oneOf schemas {} matched the inputs, but a max of one is allowed.".format(cls, oneof_classes) - ) - # exactly one class matches - return path_to_schemas - - @classmethod - def __get_anyof_classes( - cls, - arg, - discriminated_cls, - validation_metadata: ValidationMetadata - ): - anyof_classes = [] - path_to_schemas = defaultdict(set) - for anyof_cls in cls.MetaOapg.any_of(): - if validation_metadata.validation_ran_earlier(anyof_cls): - anyof_classes.append(anyof_cls) - add_deeper_validated_schemas(validation_metadata, path_to_schemas) - continue - - try: - other_path_to_schemas = anyof_cls._validate_oapg(arg, validation_metadata=validation_metadata) - except (ApiValueError, ApiTypeError) as ex: - if discriminated_cls is not None and anyof_cls is discriminated_cls: - raise ex - continue - anyof_classes.append(anyof_cls) - update(path_to_schemas, other_path_to_schemas) - if not anyof_classes: - raise ApiValueError( - "Invalid inputs given to generate an instance of {}. None " - "of the anyOf schemas matched the input data.".format(cls) - ) - return path_to_schemas - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: - """ - ComposedBase _validate_oapg - We return dynamic classes of different bases depending upon the inputs - This makes it so: - - the returned instance is always a subclass of our defining schema - - this allows us to check type based on whether an instance is a subclass of a schema - - the returned instance is a serializable type (except for None, True, and False) which are enums - - Returns: - new_cls (type): the new class - - Raises: - ApiValueError: when a string can't be converted into a date or datetime and it must be one of those classes - ApiTypeError: when the input type is not in the list of allowed spec types - """ - # validation checking on types, validations, and enums - path_to_schemas = super()._validate_oapg(arg, validation_metadata=validation_metadata) - - updated_vm = ValidationMetadata( - configuration=validation_metadata.configuration, - from_server=validation_metadata.from_server, - path_to_item=validation_metadata.path_to_item, - seen_classes=validation_metadata.seen_classes | frozenset({cls}), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas - ) - - # process composed schema - discriminator = None - if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'discriminator'): - discriminator = cls.MetaOapg.discriminator() - discriminated_cls = None - if discriminator and arg and isinstance(arg, frozendict.frozendict): - disc_property_name = list(discriminator.keys())[0] - cls._ensure_discriminator_value_present_oapg(disc_property_name, updated_vm, arg) - # get discriminated_cls by looking at the dict in the current class - discriminated_cls = cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=arg[disc_property_name]) - if discriminated_cls is None: - raise ApiValueError( - "Invalid discriminator value '{}' was passed in to {}.{} Only the values {} are allowed at {}".format( - arg[disc_property_name], - cls.__name__, - disc_property_name, - list(discriminator[disc_property_name].keys()), - updated_vm.path_to_item + (disc_property_name,) - ) - ) - - if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'all_of'): - other_path_to_schemas = cls.__get_allof_classes(arg, validation_metadata=updated_vm) - update(path_to_schemas, other_path_to_schemas) - if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'one_of'): - other_path_to_schemas = cls.__get_oneof_class( - arg, - discriminated_cls=discriminated_cls, - validation_metadata=updated_vm - ) - update(path_to_schemas, other_path_to_schemas) - if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'any_of'): - other_path_to_schemas = cls.__get_anyof_classes( - arg, - discriminated_cls=discriminated_cls, - validation_metadata=updated_vm - ) - update(path_to_schemas, other_path_to_schemas) - not_cls = None - if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'not_schema'): - not_cls = cls.MetaOapg.not_schema - not_cls = cls._get_class_oapg(not_cls) - if not_cls: - other_path_to_schemas = None - not_exception = ApiValueError( - "Invalid value '{}' was passed in to {}. Value is invalid because it is disallowed by {}".format( - arg, - cls.__name__, - not_cls.__name__, - ) - ) - if updated_vm.validation_ran_earlier(not_cls): - raise not_exception - - try: - other_path_to_schemas = not_cls._validate_oapg(arg, validation_metadata=updated_vm) - except (ApiValueError, ApiTypeError): - pass - if other_path_to_schemas: - raise not_exception - - if discriminated_cls is not None and not updated_vm.validation_ran_earlier(discriminated_cls): - # TODO use an exception from this package here - add_deeper_validated_schemas(updated_vm, path_to_schemas) - assert discriminated_cls in path_to_schemas[updated_vm.path_to_item] - return path_to_schemas - - -# DictBase, ListBase, NumberBase, StrBase, BoolBase, NoneBase -class ComposedSchema( - ComposedBase, - DictBase, - ListBase, - NumberBase, - StrBase, - BoolBase, - NoneBase, - Schema, - NoneFrozenDictTupleStrDecimalBoolMixin -): - @classmethod - def from_openapi_data_oapg(cls, *args: typing.Any, _configuration: typing.Optional[Configuration] = None, **kwargs): - if not args: - if not kwargs: - raise ApiTypeError('{} is missing required input data in args or kwargs'.format(cls.__name__)) - args = (kwargs, ) - return super().from_openapi_data_oapg(args[0], _configuration=_configuration) - - -class ListSchema( - ListBase, - Schema, - TupleMixin -): - - @classmethod - def from_openapi_data_oapg(cls, arg: typing.List[typing.Any], _configuration: typing.Optional[Configuration] = None): - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - def __new__(cls, _arg: typing.Union[typing.List[typing.Any], typing.Tuple[typing.Any]], **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class NoneSchema( - NoneBase, - Schema, - NoneMixin -): - - @classmethod - def from_openapi_data_oapg(cls, arg: None, _configuration: typing.Optional[Configuration] = None): - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - def __new__(cls, _arg: None, **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class NumberSchema( - NumberBase, - Schema, - DecimalMixin -): - """ - This is used for type: number with no format - Both integers AND floats are accepted - """ - - @classmethod - def from_openapi_data_oapg(cls, arg: typing.Union[int, float], _configuration: typing.Optional[Configuration] = None): - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - def __new__(cls, _arg: typing.Union[decimal.Decimal, int, float], **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class IntBase: - @property - def as_int_oapg(self) -> int: - try: - return self._as_int - except AttributeError: - self._as_int = int(self) - return self._as_int - - @classmethod - def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): - if isinstance(arg, decimal.Decimal): - - denominator = arg.as_integer_ratio()[-1] - if denominator != 1: - raise ApiValueError( - "Invalid value '{}' for type integer at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - IntBase _validate_oapg - TODO what about types = (int, number) -> IntBase, NumberBase? We could drop int and keep number only - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class IntSchema(IntBase, NumberSchema): - - @classmethod - def from_openapi_data_oapg(cls, arg: int, _configuration: typing.Optional[Configuration] = None): - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - def __new__(cls, _arg: typing.Union[decimal.Decimal, int], **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class Int32Base: - __inclusive_minimum = decimal.Decimal(-2147483648) - __inclusive_maximum = decimal.Decimal(2147483647) - - @classmethod - def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): - if isinstance(arg, decimal.Decimal) and arg.as_tuple().exponent == 0: - if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum: - raise ApiValueError( - "Invalid value '{}' for type int32 at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - Int32Base _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class Int32Schema( - Int32Base, - IntSchema -): - pass - - -class Int64Base: - __inclusive_minimum = decimal.Decimal(-9223372036854775808) - __inclusive_maximum = decimal.Decimal(9223372036854775807) - - @classmethod - def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): - if isinstance(arg, decimal.Decimal) and arg.as_tuple().exponent == 0: - if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum: - raise ApiValueError( - "Invalid value '{}' for type int64 at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - Int64Base _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class Int64Schema( - Int64Base, - IntSchema -): - pass - - -class Float32Base: - __inclusive_minimum = decimal.Decimal(-3.4028234663852886e+38) - __inclusive_maximum = decimal.Decimal(3.4028234663852886e+38) - - @classmethod - def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): - if isinstance(arg, decimal.Decimal): - if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum: - raise ApiValueError( - "Invalid value '{}' for type float at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - Float32Base _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - - -class Float32Schema( - Float32Base, - NumberSchema -): - - @classmethod - def from_openapi_data_oapg(cls, arg: float, _configuration: typing.Optional[Configuration] = None): - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - -class Float64Base: - __inclusive_minimum = decimal.Decimal(-1.7976931348623157E+308) - __inclusive_maximum = decimal.Decimal(1.7976931348623157E+308) - - @classmethod - def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): - if isinstance(arg, decimal.Decimal): - if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum: - raise ApiValueError( - "Invalid value '{}' for type double at {}".format(arg, validation_metadata.path_to_item) - ) - - @classmethod - def _validate_oapg( - cls, - arg, - validation_metadata: ValidationMetadata, - ): - """ - Float64Base _validate_oapg - """ - cls.__validate_format(arg, validation_metadata=validation_metadata) - return super()._validate_oapg(arg, validation_metadata=validation_metadata) - -class Float64Schema( - Float64Base, - NumberSchema -): - - @classmethod - def from_openapi_data_oapg(cls, arg: float, _configuration: typing.Optional[Configuration] = None): - # todo check format - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - -class StrSchema( - StrBase, - Schema, - StrMixin -): - """ - date + datetime string types must inherit from this class - That is because one can validate a str payload as both: - - type: string (format unset) - - type: string, format: date - """ - - @classmethod - def from_openapi_data_oapg(cls, arg: str, _configuration: typing.Optional[Configuration] = None) -> 'StrSchema': - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - def __new__(cls, _arg: typing.Union[str, date, datetime, uuid.UUID], **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class UUIDSchema(UUIDBase, StrSchema): - - def __new__(cls, _arg: typing.Union[str, uuid.UUID], **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class DateSchema(DateBase, StrSchema): - - def __new__(cls, _arg: typing.Union[str, date], **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class DateTimeSchema(DateTimeBase, StrSchema): - - def __new__(cls, _arg: typing.Union[str, datetime], **kwargs: Configuration): - return super().__new__(cls, _arg, **kwargs) - - -class DecimalSchema(DecimalBase, StrSchema): - - def __new__(cls, _arg: str, **kwargs: Configuration): - """ - Note: Decimals may not be passed in because cast_to_allowed_types is only invoked once for payloads - which can be simple (str) or complex (dicts or lists with nested values) - Because casting is only done once and recursively casts all values prior to validation then for a potential - client side Decimal input if Decimal was accepted as an input in DecimalSchema then one would not know - if one was using it for a StrSchema (where it should be cast to str) or one is using it for NumberSchema - where it should stay as Decimal. - """ - return super().__new__(cls, _arg, **kwargs) - - -class BytesSchema( - Schema, - BytesMixin -): - """ - this class will subclass bytes and is immutable - """ - def __new__(cls, _arg: bytes, **kwargs: Configuration): - return super(Schema, cls).__new__(cls, _arg) - - -class FileSchema( - Schema, - FileMixin -): - """ - This class is NOT immutable - Dynamic classes are built using it for example when AnyType allows in binary data - Al other schema classes ARE immutable - If one wanted to make this immutable one could make this a DictSchema with required properties: - - data = BytesSchema (which would be an immutable bytes based schema) - - file_name = StrSchema - and cast_to_allowed_types would convert bytes and file instances into dicts containing data + file_name - The downside would be that data would be stored in memory which one may not want to do for very large files - - The developer is responsible for closing this file and deleting it - - This class was kept as mutable: - - to allow file reading and writing to disk - - to be able to preserve file name info - """ - - def __new__(cls, _arg: typing.Union[io.FileIO, io.BufferedReader], **kwargs: Configuration): - return super(Schema, cls).__new__(cls, _arg) - - -class BinaryBase: - pass - - -class BinarySchema( - ComposedBase, - BinaryBase, - Schema, - BinaryMixin -): - class MetaOapg: - @staticmethod - def one_of(): - return [ - BytesSchema, - FileSchema, - ] - - def __new__(cls, _arg: typing.Union[io.FileIO, io.BufferedReader, bytes], **kwargs: Configuration): - return super().__new__(cls, _arg) - - -class BoolSchema( - BoolBase, - Schema, - BoolMixin -): - - @classmethod - def from_openapi_data_oapg(cls, arg: bool, _configuration: typing.Optional[Configuration] = None): - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - def __new__(cls, _arg: bool, **kwargs: ValidationMetadata): - return super().__new__(cls, _arg, **kwargs) - - -class AnyTypeSchema( - DictBase, - ListBase, - NumberBase, - StrBase, - BoolBase, - NoneBase, - Schema, - NoneFrozenDictTupleStrDecimalBoolFileBytesMixin -): - # Python representation of a schema defined as true or {} - pass - - -class UnsetAnyTypeSchema(AnyTypeSchema): - # Used when additionalProperties/items was not explicitly defined and a defining schema is needed - pass - - -class NotAnyTypeSchema( - ComposedSchema, -): - """ - Python representation of a schema defined as false or {'not': {}} - Does not allow inputs in of AnyType - Note: validations on this class are never run because the code knows that no inputs will ever validate - """ - - class MetaOapg: - not_schema = AnyTypeSchema - - def __new__( - cls, - *_args, - _configuration: typing.Optional[Configuration] = None, - ) -> 'NotAnyTypeSchema': - return super().__new__( - cls, - *_args, - _configuration=_configuration, - ) - - -class DictSchema( - DictBase, - Schema, - FrozenDictMixin -): - @classmethod - def from_openapi_data_oapg(cls, arg: typing.Dict[str, typing.Any], _configuration: typing.Optional[Configuration] = None): - return super().from_openapi_data_oapg(arg, _configuration=_configuration) - - def __new__(cls, *_args: typing.Union[dict, frozendict.frozendict], **kwargs: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, bytes, Schema, Unset, ValidationMetadata]): - return super().__new__(cls, *_args, **kwargs) - - -schema_type_classes = {NoneSchema, DictSchema, ListSchema, NumberSchema, StrSchema, BoolSchema, AnyTypeSchema} - - -@functools.lru_cache() -def get_new_class( - class_name: str, - bases: typing.Tuple[typing.Type[typing.Union[Schema, typing.Any]], ...] -) -> typing.Type[Schema]: - """ - Returns a new class that is made with the subclass bases - """ - new_cls: typing.Type[Schema] = type(class_name, bases, {}) - return new_cls - - -LOG_CACHE_USAGE = False - - -def log_cache_usage(cache_fn): - if LOG_CACHE_USAGE: - print(cache_fn.__name__, cache_fn.cache_info()) \ No newline at end of file diff --git a/launch/api_client/test/__init__.py b/launch/api_client/test/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/launch/api_client/test/test_models/__init__.py b/launch/api_client/test/test_models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/launch/api_client/test/test_models/test_annotation.py b/launch/api_client/test/test_models/test_annotation.py deleted file mode 100644 index 16171d1c..00000000 --- a/launch/api_client/test/test_models/test_annotation.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.annotation import Annotation - - -class TestAnnotation(unittest.TestCase): - """Annotation unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_audio.py b/launch/api_client/test/test_models/test_audio.py deleted file mode 100644 index 48f48bec..00000000 --- a/launch/api_client/test/test_models/test_audio.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.audio import Audio - - -class TestAudio(unittest.TestCase): - """Audio unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_audio1.py b/launch/api_client/test/test_models/test_audio1.py deleted file mode 100644 index 1f854bd7..00000000 --- a/launch/api_client/test/test_models/test_audio1.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.audio1 import Audio1 - - -class TestAudio1(unittest.TestCase): - """Audio1 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_audio2.py b/launch/api_client/test/test_models/test_audio2.py deleted file mode 100644 index b2f3bfc0..00000000 --- a/launch/api_client/test/test_models/test_audio2.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.audio2 import Audio2 - - -class TestAudio2(unittest.TestCase): - """Audio2 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_completions_job.py b/launch/api_client/test/test_models/test_batch_completions_job.py deleted file mode 100644 index c260fbb7..00000000 --- a/launch/api_client/test/test_models/test_batch_completions_job.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.batch_completions_job import BatchCompletionsJob - - -class TestBatchCompletionsJob(unittest.TestCase): - """BatchCompletionsJob unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_completions_job_status.py b/launch/api_client/test/test_models/test_batch_completions_job_status.py deleted file mode 100644 index fd6a44e0..00000000 --- a/launch/api_client/test/test_models/test_batch_completions_job_status.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.batch_completions_job_status import ( - BatchCompletionsJobStatus, -) - - -class TestBatchCompletionsJobStatus(unittest.TestCase): - """BatchCompletionsJobStatus unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_completions_model_config.py b/launch/api_client/test/test_models/test_batch_completions_model_config.py deleted file mode 100644 index 6760fe00..00000000 --- a/launch/api_client/test/test_models/test_batch_completions_model_config.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.batch_completions_model_config import ( - BatchCompletionsModelConfig, -) - - -class TestBatchCompletionsModelConfig(unittest.TestCase): - """BatchCompletionsModelConfig unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_job_serialization_format.py b/launch/api_client/test/test_models/test_batch_job_serialization_format.py deleted file mode 100644 index 8aee9056..00000000 --- a/launch/api_client/test/test_models/test_batch_job_serialization_format.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.batch_job_serialization_format import ( - BatchJobSerializationFormat, -) - - -class TestBatchJobSerializationFormat(unittest.TestCase): - """BatchJobSerializationFormat unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_job_status.py b/launch/api_client/test/test_models/test_batch_job_status.py deleted file mode 100644 index 9df0489e..00000000 --- a/launch/api_client/test/test_models/test_batch_job_status.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.batch_job_status import BatchJobStatus - - -class TestBatchJobStatus(unittest.TestCase): - """BatchJobStatus unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_callback_auth.py b/launch/api_client/test/test_models/test_callback_auth.py deleted file mode 100644 index 2db08713..00000000 --- a/launch/api_client/test/test_models/test_callback_auth.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.callback_auth import CallbackAuth - - -class TestCallbackAuth(unittest.TestCase): - """CallbackAuth unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_callback_basic_auth.py b/launch/api_client/test/test_models/test_callback_basic_auth.py deleted file mode 100644 index f4cd9c54..00000000 --- a/launch/api_client/test/test_models/test_callback_basic_auth.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.callback_basic_auth import CallbackBasicAuth - - -class TestCallbackBasicAuth(unittest.TestCase): - """CallbackBasicAuth unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_callbackm_tls_auth.py b/launch/api_client/test/test_models/test_callbackm_tls_auth.py deleted file mode 100644 index 90218ce5..00000000 --- a/launch/api_client/test/test_models/test_callbackm_tls_auth.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.callbackm_tls_auth import CallbackmTLSAuth - - -class TestCallbackmTLSAuth(unittest.TestCase): - """CallbackmTLSAuth unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_cancel_batch_completions_v2_response.py b/launch/api_client/test/test_models/test_cancel_batch_completions_v2_response.py deleted file mode 100644 index f40b55b8..00000000 --- a/launch/api_client/test/test_models/test_cancel_batch_completions_v2_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.cancel_batch_completions_v2_response import ( - CancelBatchCompletionsV2Response, -) - - -class TestCancelBatchCompletionsV2Response(unittest.TestCase): - """CancelBatchCompletionsV2Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_cancel_fine_tune_response.py b/launch/api_client/test/test_models/test_cancel_fine_tune_response.py deleted file mode 100644 index 581efacb..00000000 --- a/launch/api_client/test/test_models/test_cancel_fine_tune_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.cancel_fine_tune_response import ( - CancelFineTuneResponse, -) - - -class TestCancelFineTuneResponse(unittest.TestCase): - """CancelFineTuneResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_function_call_option.py b/launch/api_client/test/test_models/test_chat_completion_function_call_option.py deleted file mode 100644 index f9d6e187..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_function_call_option.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_function_call_option import ( - ChatCompletionFunctionCallOption, -) - - -class TestChatCompletionFunctionCallOption(unittest.TestCase): - """ChatCompletionFunctionCallOption unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_functions.py b/launch/api_client/test/test_models/test_chat_completion_functions.py deleted file mode 100644 index ff47359b..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_functions.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_functions import ( - ChatCompletionFunctions, -) - - -class TestChatCompletionFunctions(unittest.TestCase): - """ChatCompletionFunctions unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_call.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_call.py deleted file mode 100644 index 2aefecbf..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_message_tool_call.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_message_tool_call import ( - ChatCompletionMessageToolCall, -) - - -class TestChatCompletionMessageToolCall(unittest.TestCase): - """ChatCompletionMessageToolCall unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_call_chunk.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_call_chunk.py deleted file mode 100644 index f0e15c18..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_message_tool_call_chunk.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_message_tool_call_chunk import ( - ChatCompletionMessageToolCallChunk, -) - - -class TestChatCompletionMessageToolCallChunk(unittest.TestCase): - """ChatCompletionMessageToolCallChunk unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_input.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_input.py deleted file mode 100644 index 0ccacadd..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_input.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_message_tool_calls_input import ( - ChatCompletionMessageToolCallsInput, -) - - -class TestChatCompletionMessageToolCallsInput(unittest.TestCase): - """ChatCompletionMessageToolCallsInput unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_output.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_output.py deleted file mode 100644 index 98544a22..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_output.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_message_tool_calls_output import ( - ChatCompletionMessageToolCallsOutput, -) - - -class TestChatCompletionMessageToolCallsOutput(unittest.TestCase): - """ChatCompletionMessageToolCallsOutput unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_named_tool_choice.py b/launch/api_client/test/test_models/test_chat_completion_named_tool_choice.py deleted file mode 100644 index b8472a2b..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_named_tool_choice.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_named_tool_choice import ( - ChatCompletionNamedToolChoice, -) - - -class TestChatCompletionNamedToolChoice(unittest.TestCase): - """ChatCompletionNamedToolChoice unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_assistant_message.py b/launch/api_client/test/test_models/test_chat_completion_request_assistant_message.py deleted file mode 100644 index 1451e65d..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_assistant_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_assistant_message import ( - ChatCompletionRequestAssistantMessage, -) - - -class TestChatCompletionRequestAssistantMessage(unittest.TestCase): - """ChatCompletionRequestAssistantMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_assistant_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_assistant_message_content_part.py deleted file mode 100644 index d2aaf7c5..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_assistant_message_content_part.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_assistant_message_content_part import ( - ChatCompletionRequestAssistantMessageContentPart, -) - - -class TestChatCompletionRequestAssistantMessageContentPart(unittest.TestCase): - """ChatCompletionRequestAssistantMessageContentPart unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_developer_message.py b/launch/api_client/test/test_models/test_chat_completion_request_developer_message.py deleted file mode 100644 index c39bee08..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_developer_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_developer_message import ( - ChatCompletionRequestDeveloperMessage, -) - - -class TestChatCompletionRequestDeveloperMessage(unittest.TestCase): - """ChatCompletionRequestDeveloperMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_function_message.py b/launch/api_client/test/test_models/test_chat_completion_request_function_message.py deleted file mode 100644 index ce552c33..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_function_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_function_message import ( - ChatCompletionRequestFunctionMessage, -) - - -class TestChatCompletionRequestFunctionMessage(unittest.TestCase): - """ChatCompletionRequestFunctionMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message.py b/launch/api_client/test/test_models/test_chat_completion_request_message.py deleted file mode 100644 index 5d5f9657..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_message import ( - ChatCompletionRequestMessage, -) - - -class TestChatCompletionRequestMessage(unittest.TestCase): - """ChatCompletionRequestMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_audio.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_audio.py deleted file mode 100644 index 94c867f4..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_audio.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_message_content_part_audio import ( - ChatCompletionRequestMessageContentPartAudio, -) - - -class TestChatCompletionRequestMessageContentPartAudio(unittest.TestCase): - """ChatCompletionRequestMessageContentPartAudio unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_file.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_file.py deleted file mode 100644 index f41daf86..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_file.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_message_content_part_file import ( - ChatCompletionRequestMessageContentPartFile, -) - - -class TestChatCompletionRequestMessageContentPartFile(unittest.TestCase): - """ChatCompletionRequestMessageContentPartFile unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_image.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_image.py deleted file mode 100644 index eec5e97b..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_image.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_message_content_part_image import ( - ChatCompletionRequestMessageContentPartImage, -) - - -class TestChatCompletionRequestMessageContentPartImage(unittest.TestCase): - """ChatCompletionRequestMessageContentPartImage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_refusal.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_refusal.py deleted file mode 100644 index 6f3a05ac..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_refusal.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_message_content_part_refusal import ( - ChatCompletionRequestMessageContentPartRefusal, -) - - -class TestChatCompletionRequestMessageContentPartRefusal(unittest.TestCase): - """ChatCompletionRequestMessageContentPartRefusal unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_text.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_text.py deleted file mode 100644 index 2e527875..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_text.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_message_content_part_text import ( - ChatCompletionRequestMessageContentPartText, -) - - -class TestChatCompletionRequestMessageContentPartText(unittest.TestCase): - """ChatCompletionRequestMessageContentPartText unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_system_message.py b/launch/api_client/test/test_models/test_chat_completion_request_system_message.py deleted file mode 100644 index d6537bd6..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_system_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_system_message import ( - ChatCompletionRequestSystemMessage, -) - - -class TestChatCompletionRequestSystemMessage(unittest.TestCase): - """ChatCompletionRequestSystemMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_system_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_system_message_content_part.py deleted file mode 100644 index 549c3f7b..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_system_message_content_part.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_system_message_content_part import ( - ChatCompletionRequestSystemMessageContentPart, -) - - -class TestChatCompletionRequestSystemMessageContentPart(unittest.TestCase): - """ChatCompletionRequestSystemMessageContentPart unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_tool_message.py b/launch/api_client/test/test_models/test_chat_completion_request_tool_message.py deleted file mode 100644 index d95444ab..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_tool_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_tool_message import ( - ChatCompletionRequestToolMessage, -) - - -class TestChatCompletionRequestToolMessage(unittest.TestCase): - """ChatCompletionRequestToolMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_tool_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_tool_message_content_part.py deleted file mode 100644 index 46d2afa1..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_tool_message_content_part.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_tool_message_content_part import ( - ChatCompletionRequestToolMessageContentPart, -) - - -class TestChatCompletionRequestToolMessageContentPart(unittest.TestCase): - """ChatCompletionRequestToolMessageContentPart unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_user_message.py b/launch/api_client/test/test_models/test_chat_completion_request_user_message.py deleted file mode 100644 index 06bc068a..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_user_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_user_message import ( - ChatCompletionRequestUserMessage, -) - - -class TestChatCompletionRequestUserMessage(unittest.TestCase): - """ChatCompletionRequestUserMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_user_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_user_message_content_part.py deleted file mode 100644 index 7bc9b522..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_request_user_message_content_part.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_request_user_message_content_part import ( - ChatCompletionRequestUserMessageContentPart, -) - - -class TestChatCompletionRequestUserMessageContentPart(unittest.TestCase): - """ChatCompletionRequestUserMessageContentPart unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_response_message.py b/launch/api_client/test/test_models/test_chat_completion_response_message.py deleted file mode 100644 index f9c75f59..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_response_message.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_response_message import ( - ChatCompletionResponseMessage, -) - - -class TestChatCompletionResponseMessage(unittest.TestCase): - """ChatCompletionResponseMessage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_stream_options.py b/launch/api_client/test/test_models/test_chat_completion_stream_options.py deleted file mode 100644 index e52f49a2..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_stream_options.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_stream_options import ( - ChatCompletionStreamOptions, -) - - -class TestChatCompletionStreamOptions(unittest.TestCase): - """ChatCompletionStreamOptions unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_stream_response_delta.py b/launch/api_client/test/test_models/test_chat_completion_stream_response_delta.py deleted file mode 100644 index 8456fdee..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_stream_response_delta.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_stream_response_delta import ( - ChatCompletionStreamResponseDelta, -) - - -class TestChatCompletionStreamResponseDelta(unittest.TestCase): - """ChatCompletionStreamResponseDelta unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_token_logprob.py b/launch/api_client/test/test_models/test_chat_completion_token_logprob.py deleted file mode 100644 index 65e63e6a..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_token_logprob.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_token_logprob import ( - ChatCompletionTokenLogprob, -) - - -class TestChatCompletionTokenLogprob(unittest.TestCase): - """ChatCompletionTokenLogprob unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_tool.py b/launch/api_client/test/test_models/test_chat_completion_tool.py deleted file mode 100644 index a7fe123e..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_tool.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_tool import ChatCompletionTool - - -class TestChatCompletionTool(unittest.TestCase): - """ChatCompletionTool unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_tool_choice_option.py b/launch/api_client/test/test_models/test_chat_completion_tool_choice_option.py deleted file mode 100644 index 9330c8e6..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_tool_choice_option.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_tool_choice_option import ( - ChatCompletionToolChoiceOption, -) - - -class TestChatCompletionToolChoiceOption(unittest.TestCase): - """ChatCompletionToolChoiceOption unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_v2_request.py b/launch/api_client/test/test_models/test_chat_completion_v2_request.py deleted file mode 100644 index 6117d307..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_v2_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_v2_request import ( - ChatCompletionV2Request, -) - - -class TestChatCompletionV2Request(unittest.TestCase): - """ChatCompletionV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_v2_stream_error_chunk.py b/launch/api_client/test/test_models/test_chat_completion_v2_stream_error_chunk.py deleted file mode 100644 index 5ed2c7e0..00000000 --- a/launch/api_client/test/test_models/test_chat_completion_v2_stream_error_chunk.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.chat_completion_v2_stream_error_chunk import ( - ChatCompletionV2StreamErrorChunk, -) - - -class TestChatCompletionV2StreamErrorChunk(unittest.TestCase): - """ChatCompletionV2StreamErrorChunk unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_choice.py b/launch/api_client/test/test_models/test_choice.py deleted file mode 100644 index 853f8b63..00000000 --- a/launch/api_client/test/test_models/test_choice.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.choice import Choice - - -class TestChoice(unittest.TestCase): - """Choice unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_choice1.py b/launch/api_client/test/test_models/test_choice1.py deleted file mode 100644 index 2f74ca27..00000000 --- a/launch/api_client/test/test_models/test_choice1.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.choice1 import Choice1 - - -class TestChoice1(unittest.TestCase): - """Choice1 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_choice2.py b/launch/api_client/test/test_models/test_choice2.py deleted file mode 100644 index fdc2936e..00000000 --- a/launch/api_client/test/test_models/test_choice2.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.choice2 import Choice2 - - -class TestChoice2(unittest.TestCase): - """Choice2 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_clone_model_bundle_v1_request.py b/launch/api_client/test/test_models/test_clone_model_bundle_v1_request.py deleted file mode 100644 index b69c30dc..00000000 --- a/launch/api_client/test/test_models/test_clone_model_bundle_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.clone_model_bundle_v1_request import ( - CloneModelBundleV1Request, -) - - -class TestCloneModelBundleV1Request(unittest.TestCase): - """CloneModelBundleV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_clone_model_bundle_v2_request.py b/launch/api_client/test/test_models/test_clone_model_bundle_v2_request.py deleted file mode 100644 index 7f31450d..00000000 --- a/launch/api_client/test/test_models/test_clone_model_bundle_v2_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.clone_model_bundle_v2_request import ( - CloneModelBundleV2Request, -) - - -class TestCloneModelBundleV2Request(unittest.TestCase): - """CloneModelBundleV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_cloudpickle_artifact_flavor.py b/launch/api_client/test/test_models/test_cloudpickle_artifact_flavor.py deleted file mode 100644 index e5696f3a..00000000 --- a/launch/api_client/test/test_models/test_cloudpickle_artifact_flavor.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) - - -class TestCloudpickleArtifactFlavor(unittest.TestCase): - """CloudpickleArtifactFlavor unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_output.py b/launch/api_client/test/test_models/test_completion_output.py deleted file mode 100644 index b402a68f..00000000 --- a/launch/api_client/test/test_models/test_completion_output.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_output import CompletionOutput - - -class TestCompletionOutput(unittest.TestCase): - """CompletionOutput unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_stream_output.py b/launch/api_client/test/test_models/test_completion_stream_output.py deleted file mode 100644 index fa8aa3ca..00000000 --- a/launch/api_client/test/test_models/test_completion_stream_output.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_stream_output import ( - CompletionStreamOutput, -) - - -class TestCompletionStreamOutput(unittest.TestCase): - """CompletionStreamOutput unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_stream_v1_request.py b/launch/api_client/test/test_models/test_completion_stream_v1_request.py deleted file mode 100644 index d6d9ea03..00000000 --- a/launch/api_client/test/test_models/test_completion_stream_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_stream_v1_request import ( - CompletionStreamV1Request, -) - - -class TestCompletionStreamV1Request(unittest.TestCase): - """CompletionStreamV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_stream_v1_response.py b/launch/api_client/test/test_models/test_completion_stream_v1_response.py deleted file mode 100644 index 559c0ae0..00000000 --- a/launch/api_client/test/test_models/test_completion_stream_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_stream_v1_response import ( - CompletionStreamV1Response, -) - - -class TestCompletionStreamV1Response(unittest.TestCase): - """CompletionStreamV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_sync_v1_request.py b/launch/api_client/test/test_models/test_completion_sync_v1_request.py deleted file mode 100644 index 116293d3..00000000 --- a/launch/api_client/test/test_models/test_completion_sync_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_sync_v1_request import ( - CompletionSyncV1Request, -) - - -class TestCompletionSyncV1Request(unittest.TestCase): - """CompletionSyncV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_sync_v1_response.py b/launch/api_client/test/test_models/test_completion_sync_v1_response.py deleted file mode 100644 index 15d6fa17..00000000 --- a/launch/api_client/test/test_models/test_completion_sync_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) - - -class TestCompletionSyncV1Response(unittest.TestCase): - """CompletionSyncV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_tokens_details.py b/launch/api_client/test/test_models/test_completion_tokens_details.py deleted file mode 100644 index 1726e781..00000000 --- a/launch/api_client/test/test_models/test_completion_tokens_details.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_tokens_details import ( - CompletionTokensDetails, -) - - -class TestCompletionTokensDetails(unittest.TestCase): - """CompletionTokensDetails unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_usage.py b/launch/api_client/test/test_models/test_completion_usage.py deleted file mode 100644 index f328d79e..00000000 --- a/launch/api_client/test/test_models/test_completion_usage.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_usage import CompletionUsage - - -class TestCompletionUsage(unittest.TestCase): - """CompletionUsage unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_v2_request.py b/launch/api_client/test/test_models/test_completion_v2_request.py deleted file mode 100644 index ea95737a..00000000 --- a/launch/api_client/test/test_models/test_completion_v2_request.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_v2_request import CompletionV2Request - - -class TestCompletionV2Request(unittest.TestCase): - """CompletionV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_v2_stream_error_chunk.py b/launch/api_client/test/test_models/test_completion_v2_stream_error_chunk.py deleted file mode 100644 index 588cc489..00000000 --- a/launch/api_client/test/test_models/test_completion_v2_stream_error_chunk.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.completion_v2_stream_error_chunk import ( - CompletionV2StreamErrorChunk, -) - - -class TestCompletionV2StreamErrorChunk(unittest.TestCase): - """CompletionV2StreamErrorChunk unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_content.py b/launch/api_client/test/test_models/test_content.py deleted file mode 100644 index 086be440..00000000 --- a/launch/api_client/test/test_models/test_content.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.content import Content - - -class TestContent(unittest.TestCase): - """Content unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_content1.py b/launch/api_client/test/test_models/test_content1.py deleted file mode 100644 index 34ea8307..00000000 --- a/launch/api_client/test/test_models/test_content1.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.content1 import Content1 - - -class TestContent1(unittest.TestCase): - """Content1 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_content2.py b/launch/api_client/test/test_models/test_content2.py deleted file mode 100644 index a17771b2..00000000 --- a/launch/api_client/test/test_models/test_content2.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.content2 import Content2 - - -class TestContent2(unittest.TestCase): - """Content2 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_content3.py b/launch/api_client/test/test_models/test_content3.py deleted file mode 100644 index 9c4e8ab6..00000000 --- a/launch/api_client/test/test_models/test_content3.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.content3 import Content3 - - -class TestContent3(unittest.TestCase): - """Content3 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_content4.py b/launch/api_client/test/test_models/test_content4.py deleted file mode 100644 index 8d4a9d8c..00000000 --- a/launch/api_client/test/test_models/test_content4.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.content4 import Content4 - - -class TestContent4(unittest.TestCase): - """Content4 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_content8.py b/launch/api_client/test/test_models/test_content8.py deleted file mode 100644 index cc2d68a8..00000000 --- a/launch/api_client/test/test_models/test_content8.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.content8 import Content8 - - -class TestContent8(unittest.TestCase): - """Content8 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_async_task_v1_response.py b/launch/api_client/test/test_models/test_create_async_task_v1_response.py deleted file mode 100644 index 5df09f60..00000000 --- a/launch/api_client/test/test_models/test_create_async_task_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_async_task_v1_response import ( - CreateAsyncTaskV1Response, -) - - -class TestCreateAsyncTaskV1Response(unittest.TestCase): - """CreateAsyncTaskV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_model_config.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_model_config.py deleted file mode 100644 index 66de0e9b..00000000 --- a/launch/api_client/test/test_models/test_create_batch_completions_v1_model_config.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_completions_v1_model_config import ( - CreateBatchCompletionsV1ModelConfig, -) - - -class TestCreateBatchCompletionsV1ModelConfig(unittest.TestCase): - """CreateBatchCompletionsV1ModelConfig unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_request.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_request.py deleted file mode 100644 index 4a0c35f8..00000000 --- a/launch/api_client/test/test_models/test_create_batch_completions_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_completions_v1_request import ( - CreateBatchCompletionsV1Request, -) - - -class TestCreateBatchCompletionsV1Request(unittest.TestCase): - """CreateBatchCompletionsV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_request_content.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_request_content.py deleted file mode 100644 index d83e4ab9..00000000 --- a/launch/api_client/test/test_models/test_create_batch_completions_v1_request_content.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_completions_v1_request_content import ( - CreateBatchCompletionsV1RequestContent, -) - - -class TestCreateBatchCompletionsV1RequestContent(unittest.TestCase): - """CreateBatchCompletionsV1RequestContent unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_response.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_response.py deleted file mode 100644 index 3178d605..00000000 --- a/launch/api_client/test/test_models/test_create_batch_completions_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_completions_v1_response import ( - CreateBatchCompletionsV1Response, -) - - -class TestCreateBatchCompletionsV1Response(unittest.TestCase): - """CreateBatchCompletionsV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v2_request.py b/launch/api_client/test/test_models/test_create_batch_completions_v2_request.py deleted file mode 100644 index 0b3f52c7..00000000 --- a/launch/api_client/test/test_models/test_create_batch_completions_v2_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_completions_v2_request import ( - CreateBatchCompletionsV2Request, -) - - -class TestCreateBatchCompletionsV2Request(unittest.TestCase): - """CreateBatchCompletionsV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_job_resource_requests.py b/launch/api_client/test/test_models/test_create_batch_job_resource_requests.py deleted file mode 100644 index 5899dc99..00000000 --- a/launch/api_client/test/test_models/test_create_batch_job_resource_requests.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_job_resource_requests import ( - CreateBatchJobResourceRequests, -) - - -class TestCreateBatchJobResourceRequests(unittest.TestCase): - """CreateBatchJobResourceRequests unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_job_v1_request.py b/launch/api_client/test/test_models/test_create_batch_job_v1_request.py deleted file mode 100644 index 66fa7437..00000000 --- a/launch/api_client/test/test_models/test_create_batch_job_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_job_v1_request import ( - CreateBatchJobV1Request, -) - - -class TestCreateBatchJobV1Request(unittest.TestCase): - """CreateBatchJobV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_job_v1_response.py b/launch/api_client/test/test_models/test_create_batch_job_v1_response.py deleted file mode 100644 index a468158b..00000000 --- a/launch/api_client/test/test_models/test_create_batch_job_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_batch_job_v1_response import ( - CreateBatchJobV1Response, -) - - -class TestCreateBatchJobV1Response(unittest.TestCase): - """CreateBatchJobV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_chat_completion_response.py b/launch/api_client/test/test_models/test_create_chat_completion_response.py deleted file mode 100644 index ba7985e2..00000000 --- a/launch/api_client/test/test_models/test_create_chat_completion_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_chat_completion_response import ( - CreateChatCompletionResponse, -) - - -class TestCreateChatCompletionResponse(unittest.TestCase): - """CreateChatCompletionResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_chat_completion_stream_response.py b/launch/api_client/test/test_models/test_create_chat_completion_stream_response.py deleted file mode 100644 index e5d414f5..00000000 --- a/launch/api_client/test/test_models/test_create_chat_completion_stream_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_chat_completion_stream_response import ( - CreateChatCompletionStreamResponse, -) - - -class TestCreateChatCompletionStreamResponse(unittest.TestCase): - """CreateChatCompletionStreamResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_completion_response.py b/launch/api_client/test/test_models/test_create_completion_response.py deleted file mode 100644 index 30ff501d..00000000 --- a/launch/api_client/test/test_models/test_create_completion_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_completion_response import ( - CreateCompletionResponse, -) - - -class TestCreateCompletionResponse(unittest.TestCase): - """CreateCompletionResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_deep_speed_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_deep_speed_model_endpoint_request.py deleted file mode 100644 index 358d0d15..00000000 --- a/launch/api_client/test/test_models/test_create_deep_speed_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_deep_speed_model_endpoint_request import ( - CreateDeepSpeedModelEndpointRequest, -) - - -class TestCreateDeepSpeedModelEndpointRequest(unittest.TestCase): - """CreateDeepSpeedModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_request.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_request.py deleted file mode 100644 index 1cd370df..00000000 --- a/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_docker_image_batch_job_bundle_v1_request import ( - CreateDockerImageBatchJobBundleV1Request, -) - - -class TestCreateDockerImageBatchJobBundleV1Request(unittest.TestCase): - """CreateDockerImageBatchJobBundleV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index 64c05b93..00000000 --- a/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_docker_image_batch_job_bundle_v1_response import ( - CreateDockerImageBatchJobBundleV1Response, -) - - -class TestCreateDockerImageBatchJobBundleV1Response(unittest.TestCase): - """CreateDockerImageBatchJobBundleV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_resource_requests.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_resource_requests.py deleted file mode 100644 index c8e7d475..00000000 --- a/launch/api_client/test/test_models/test_create_docker_image_batch_job_resource_requests.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) - - -class TestCreateDockerImageBatchJobResourceRequests(unittest.TestCase): - """CreateDockerImageBatchJobResourceRequests unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_request.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_request.py deleted file mode 100644 index 43a17ed4..00000000 --- a/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_docker_image_batch_job_v1_request import ( - CreateDockerImageBatchJobV1Request, -) - - -class TestCreateDockerImageBatchJobV1Request(unittest.TestCase): - """CreateDockerImageBatchJobV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_response.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_response.py deleted file mode 100644 index d00f1a5c..00000000 --- a/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_docker_image_batch_job_v1_response import ( - CreateDockerImageBatchJobV1Response, -) - - -class TestCreateDockerImageBatchJobV1Response(unittest.TestCase): - """CreateDockerImageBatchJobV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_fine_tune_request.py b/launch/api_client/test/test_models/test_create_fine_tune_request.py deleted file mode 100644 index cb5c9130..00000000 --- a/launch/api_client/test/test_models/test_create_fine_tune_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_fine_tune_request import ( - CreateFineTuneRequest, -) - - -class TestCreateFineTuneRequest(unittest.TestCase): - """CreateFineTuneRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_fine_tune_response.py b/launch/api_client/test/test_models/test_create_fine_tune_response.py deleted file mode 100644 index 3c1976ca..00000000 --- a/launch/api_client/test/test_models/test_create_fine_tune_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_fine_tune_response import ( - CreateFineTuneResponse, -) - - -class TestCreateFineTuneResponse(unittest.TestCase): - """CreateFineTuneResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_light_llm_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_light_llm_model_endpoint_request.py deleted file mode 100644 index de9f89ea..00000000 --- a/launch/api_client/test/test_models/test_create_light_llm_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_light_llm_model_endpoint_request import ( - CreateLightLLMModelEndpointRequest, -) - - -class TestCreateLightLLMModelEndpointRequest(unittest.TestCase): - """CreateLightLLMModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_request.py deleted file mode 100644 index 5193285d..00000000 --- a/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_llm_model_endpoint_v1_request import ( - CreateLLMModelEndpointV1Request, -) - - -class TestCreateLLMModelEndpointV1Request(unittest.TestCase): - """CreateLLMModelEndpointV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_response.py deleted file mode 100644 index bb13b794..00000000 --- a/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_llm_model_endpoint_v1_response import ( - CreateLLMModelEndpointV1Response, -) - - -class TestCreateLLMModelEndpointV1Response(unittest.TestCase): - """CreateLLMModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v1_request.py b/launch/api_client/test/test_models/test_create_model_bundle_v1_request.py deleted file mode 100644 index ac1b7509..00000000 --- a/launch/api_client/test/test_models/test_create_model_bundle_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_model_bundle_v1_request import ( - CreateModelBundleV1Request, -) - - -class TestCreateModelBundleV1Request(unittest.TestCase): - """CreateModelBundleV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v1_response.py b/launch/api_client/test/test_models/test_create_model_bundle_v1_response.py deleted file mode 100644 index 3cba4c7c..00000000 --- a/launch/api_client/test/test_models/test_create_model_bundle_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_model_bundle_v1_response import ( - CreateModelBundleV1Response, -) - - -class TestCreateModelBundleV1Response(unittest.TestCase): - """CreateModelBundleV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v2_request.py b/launch/api_client/test/test_models/test_create_model_bundle_v2_request.py deleted file mode 100644 index 8ba28a24..00000000 --- a/launch/api_client/test/test_models/test_create_model_bundle_v2_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_model_bundle_v2_request import ( - CreateModelBundleV2Request, -) - - -class TestCreateModelBundleV2Request(unittest.TestCase): - """CreateModelBundleV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v2_response.py b/launch/api_client/test/test_models/test_create_model_bundle_v2_response.py deleted file mode 100644 index b14593f7..00000000 --- a/launch/api_client/test/test_models/test_create_model_bundle_v2_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_model_bundle_v2_response import ( - CreateModelBundleV2Response, -) - - -class TestCreateModelBundleV2Response(unittest.TestCase): - """CreateModelBundleV2Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_create_model_endpoint_v1_request.py deleted file mode 100644 index 3504cc52..00000000 --- a/launch/api_client/test/test_models/test_create_model_endpoint_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_model_endpoint_v1_request import ( - CreateModelEndpointV1Request, -) - - -class TestCreateModelEndpointV1Request(unittest.TestCase): - """CreateModelEndpointV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_create_model_endpoint_v1_response.py deleted file mode 100644 index b15ebe7c..00000000 --- a/launch/api_client/test/test_models/test_create_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_model_endpoint_v1_response import ( - CreateModelEndpointV1Response, -) - - -class TestCreateModelEndpointV1Response(unittest.TestCase): - """CreateModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_sg_lang_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_sg_lang_model_endpoint_request.py deleted file mode 100644 index 3dbc62c6..00000000 --- a/launch/api_client/test/test_models/test_create_sg_lang_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_sg_lang_model_endpoint_request import ( - CreateSGLangModelEndpointRequest, -) - - -class TestCreateSGLangModelEndpointRequest(unittest.TestCase): - """CreateSGLangModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_tensor_rtllm_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_tensor_rtllm_model_endpoint_request.py deleted file mode 100644 index f323417e..00000000 --- a/launch/api_client/test/test_models/test_create_tensor_rtllm_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_tensor_rtllm_model_endpoint_request import ( - CreateTensorRTLLMModelEndpointRequest, -) - - -class TestCreateTensorRTLLMModelEndpointRequest(unittest.TestCase): - """CreateTensorRTLLMModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_text_generation_inference_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_text_generation_inference_model_endpoint_request.py deleted file mode 100644 index 2cce6e45..00000000 --- a/launch/api_client/test/test_models/test_create_text_generation_inference_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_text_generation_inference_model_endpoint_request import ( - CreateTextGenerationInferenceModelEndpointRequest, -) - - -class TestCreateTextGenerationInferenceModelEndpointRequest(unittest.TestCase): - """CreateTextGenerationInferenceModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_trigger_v1_request.py b/launch/api_client/test/test_models/test_create_trigger_v1_request.py deleted file mode 100644 index b4022ac5..00000000 --- a/launch/api_client/test/test_models/test_create_trigger_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_trigger_v1_request import ( - CreateTriggerV1Request, -) - - -class TestCreateTriggerV1Request(unittest.TestCase): - """CreateTriggerV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_trigger_v1_response.py b/launch/api_client/test/test_models/test_create_trigger_v1_response.py deleted file mode 100644 index ceef3d29..00000000 --- a/launch/api_client/test/test_models/test_create_trigger_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_trigger_v1_response import ( - CreateTriggerV1Response, -) - - -class TestCreateTriggerV1Response(unittest.TestCase): - """CreateTriggerV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_create_vllm_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_vllm_model_endpoint_request.py deleted file mode 100644 index d1df7abe..00000000 --- a/launch/api_client/test/test_models/test_create_vllm_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.create_vllm_model_endpoint_request import ( - CreateVLLMModelEndpointRequest, -) - - -class TestCreateVLLMModelEndpointRequest(unittest.TestCase): - """CreateVLLMModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_custom_framework.py b/launch/api_client/test/test_models/test_custom_framework.py deleted file mode 100644 index 0bfbff09..00000000 --- a/launch/api_client/test/test_models/test_custom_framework.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.custom_framework import CustomFramework - - -class TestCustomFramework(unittest.TestCase): - """CustomFramework unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_file_response.py b/launch/api_client/test/test_models/test_delete_file_response.py deleted file mode 100644 index 224e9226..00000000 --- a/launch/api_client/test/test_models/test_delete_file_response.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.delete_file_response import DeleteFileResponse - - -class TestDeleteFileResponse(unittest.TestCase): - """DeleteFileResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_llm_endpoint_response.py b/launch/api_client/test/test_models/test_delete_llm_endpoint_response.py deleted file mode 100644 index 28cfe878..00000000 --- a/launch/api_client/test/test_models/test_delete_llm_endpoint_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.delete_llm_endpoint_response import ( - DeleteLLMEndpointResponse, -) - - -class TestDeleteLLMEndpointResponse(unittest.TestCase): - """DeleteLLMEndpointResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_delete_model_endpoint_v1_response.py deleted file mode 100644 index 88e0c91c..00000000 --- a/launch/api_client/test/test_models/test_delete_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.delete_model_endpoint_v1_response import ( - DeleteModelEndpointV1Response, -) - - -class TestDeleteModelEndpointV1Response(unittest.TestCase): - """DeleteModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_trigger_v1_response.py b/launch/api_client/test/test_models/test_delete_trigger_v1_response.py deleted file mode 100644 index 85e48e44..00000000 --- a/launch/api_client/test/test_models/test_delete_trigger_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.delete_trigger_v1_response import ( - DeleteTriggerV1Response, -) - - -class TestDeleteTriggerV1Response(unittest.TestCase): - """DeleteTriggerV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_docker_image_batch_job.py b/launch/api_client/test/test_models/test_docker_image_batch_job.py deleted file mode 100644 index 4b92fcec..00000000 --- a/launch/api_client/test/test_models/test_docker_image_batch_job.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.docker_image_batch_job import DockerImageBatchJob - - -class TestDockerImageBatchJob(unittest.TestCase): - """DockerImageBatchJob unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/test/test_models/test_docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index c5681e07..00000000 --- a/launch/api_client/test/test_models/test_docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) - - -class TestDockerImageBatchJobBundleV1Response(unittest.TestCase): - """DockerImageBatchJobBundleV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_endpoint_predict_v1_request.py b/launch/api_client/test/test_models/test_endpoint_predict_v1_request.py deleted file mode 100644 index a2380e7f..00000000 --- a/launch/api_client/test/test_models/test_endpoint_predict_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.endpoint_predict_v1_request import ( - EndpointPredictV1Request, -) - - -class TestEndpointPredictV1Request(unittest.TestCase): - """EndpointPredictV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_file.py b/launch/api_client/test/test_models/test_file.py deleted file mode 100644 index 75f7173b..00000000 --- a/launch/api_client/test/test_models/test_file.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.file import File - - -class TestFile(unittest.TestCase): - """File unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_filtered_chat_completion_v2_request.py b/launch/api_client/test/test_models/test_filtered_chat_completion_v2_request.py deleted file mode 100644 index 887889e8..00000000 --- a/launch/api_client/test/test_models/test_filtered_chat_completion_v2_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.filtered_chat_completion_v2_request import ( - FilteredChatCompletionV2Request, -) - - -class TestFilteredChatCompletionV2Request(unittest.TestCase): - """FilteredChatCompletionV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_filtered_completion_v2_request.py b/launch/api_client/test/test_models/test_filtered_completion_v2_request.py deleted file mode 100644 index 993945b7..00000000 --- a/launch/api_client/test/test_models/test_filtered_completion_v2_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.filtered_completion_v2_request import ( - FilteredCompletionV2Request, -) - - -class TestFilteredCompletionV2Request(unittest.TestCase): - """FilteredCompletionV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_function1.py b/launch/api_client/test/test_models/test_function1.py deleted file mode 100644 index 3be24663..00000000 --- a/launch/api_client/test/test_models/test_function1.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.function1 import Function1 - - -class TestFunction1(unittest.TestCase): - """Function1 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_function2.py b/launch/api_client/test/test_models/test_function2.py deleted file mode 100644 index 9ef3b4b3..00000000 --- a/launch/api_client/test/test_models/test_function2.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.function2 import Function2 - - -class TestFunction2(unittest.TestCase): - """Function2 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_function3.py b/launch/api_client/test/test_models/test_function3.py deleted file mode 100644 index 14bf6d5e..00000000 --- a/launch/api_client/test/test_models/test_function3.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.function3 import Function3 - - -class TestFunction3(unittest.TestCase): - """Function3 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_function_call.py b/launch/api_client/test/test_models/test_function_call.py deleted file mode 100644 index 2e3ed603..00000000 --- a/launch/api_client/test/test_models/test_function_call.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.function_call import FunctionCall - - -class TestFunctionCall(unittest.TestCase): - """FunctionCall unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_function_call2.py b/launch/api_client/test/test_models/test_function_call2.py deleted file mode 100644 index 6477b506..00000000 --- a/launch/api_client/test/test_models/test_function_call2.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.function_call2 import FunctionCall2 - - -class TestFunctionCall2(unittest.TestCase): - """FunctionCall2 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_function_object.py b/launch/api_client/test/test_models/test_function_object.py deleted file mode 100644 index 6debac41..00000000 --- a/launch/api_client/test/test_models/test_function_object.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.function_object import FunctionObject - - -class TestFunctionObject(unittest.TestCase): - """FunctionObject unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_function_parameters.py b/launch/api_client/test/test_models/test_function_parameters.py deleted file mode 100644 index a58e1c57..00000000 --- a/launch/api_client/test/test_models/test_function_parameters.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.function_parameters import FunctionParameters - - -class TestFunctionParameters(unittest.TestCase): - """FunctionParameters unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_async_task_v1_response.py b/launch/api_client/test/test_models/test_get_async_task_v1_response.py deleted file mode 100644 index 969c9f1e..00000000 --- a/launch/api_client/test/test_models/test_get_async_task_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_async_task_v1_response import ( - GetAsyncTaskV1Response, -) - - -class TestGetAsyncTaskV1Response(unittest.TestCase): - """GetAsyncTaskV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_batch_completion_v2_response.py b/launch/api_client/test/test_models/test_get_batch_completion_v2_response.py deleted file mode 100644 index e80b0c5c..00000000 --- a/launch/api_client/test/test_models/test_get_batch_completion_v2_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_batch_completion_v2_response import ( - GetBatchCompletionV2Response, -) - - -class TestGetBatchCompletionV2Response(unittest.TestCase): - """GetBatchCompletionV2Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_batch_job_v1_response.py b/launch/api_client/test/test_models/test_get_batch_job_v1_response.py deleted file mode 100644 index 8d9d2d89..00000000 --- a/launch/api_client/test/test_models/test_get_batch_job_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_batch_job_v1_response import ( - GetBatchJobV1Response, -) - - -class TestGetBatchJobV1Response(unittest.TestCase): - """GetBatchJobV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_docker_image_batch_job_v1_response.py b/launch/api_client/test/test_models/test_get_docker_image_batch_job_v1_response.py deleted file mode 100644 index c55ec80a..00000000 --- a/launch/api_client/test/test_models/test_get_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_docker_image_batch_job_v1_response import ( - GetDockerImageBatchJobV1Response, -) - - -class TestGetDockerImageBatchJobV1Response(unittest.TestCase): - """GetDockerImageBatchJobV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_file_content_response.py b/launch/api_client/test/test_models/test_get_file_content_response.py deleted file mode 100644 index bc938a47..00000000 --- a/launch/api_client/test/test_models/test_get_file_content_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_file_content_response import ( - GetFileContentResponse, -) - - -class TestGetFileContentResponse(unittest.TestCase): - """GetFileContentResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_file_response.py b/launch/api_client/test/test_models/test_get_file_response.py deleted file mode 100644 index cd55395b..00000000 --- a/launch/api_client/test/test_models/test_get_file_response.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_file_response import GetFileResponse - - -class TestGetFileResponse(unittest.TestCase): - """GetFileResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_fine_tune_events_response.py b/launch/api_client/test/test_models/test_get_fine_tune_events_response.py deleted file mode 100644 index 67e40775..00000000 --- a/launch/api_client/test/test_models/test_get_fine_tune_events_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_fine_tune_events_response import ( - GetFineTuneEventsResponse, -) - - -class TestGetFineTuneEventsResponse(unittest.TestCase): - """GetFineTuneEventsResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_fine_tune_response.py b/launch/api_client/test/test_models/test_get_fine_tune_response.py deleted file mode 100644 index 42eedc90..00000000 --- a/launch/api_client/test/test_models/test_get_fine_tune_response.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_fine_tune_response import GetFineTuneResponse - - -class TestGetFineTuneResponse(unittest.TestCase): - """GetFineTuneResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_llm_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_get_llm_model_endpoint_v1_response.py deleted file mode 100644 index 7214f77a..00000000 --- a/launch/api_client/test/test_models/test_get_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) - - -class TestGetLLMModelEndpointV1Response(unittest.TestCase): - """GetLLMModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_get_model_endpoint_v1_response.py deleted file mode 100644 index e70ce97d..00000000 --- a/launch/api_client/test/test_models/test_get_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) - - -class TestGetModelEndpointV1Response(unittest.TestCase): - """GetModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_get_trigger_v1_response.py b/launch/api_client/test/test_models/test_get_trigger_v1_response.py deleted file mode 100644 index df0ec899..00000000 --- a/launch/api_client/test/test_models/test_get_trigger_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.get_trigger_v1_response import ( - GetTriggerV1Response, -) - - -class TestGetTriggerV1Response(unittest.TestCase): - """GetTriggerV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_gpu_type.py b/launch/api_client/test/test_models/test_gpu_type.py deleted file mode 100644 index dd3395b6..00000000 --- a/launch/api_client/test/test_models/test_gpu_type.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.gpu_type import GpuType - - -class TestGpuType(unittest.TestCase): - """GpuType unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_http_validation_error.py b/launch/api_client/test/test_models/test_http_validation_error.py deleted file mode 100644 index 3a40588f..00000000 --- a/launch/api_client/test/test_models/test_http_validation_error.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.http_validation_error import HTTPValidationError - - -class TestHTTPValidationError(unittest.TestCase): - """HTTPValidationError unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_image_url.py b/launch/api_client/test/test_models/test_image_url.py deleted file mode 100644 index 2f64d9ac..00000000 --- a/launch/api_client/test/test_models/test_image_url.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.image_url import ImageUrl - - -class TestImageUrl(unittest.TestCase): - """ImageUrl unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_input_audio.py b/launch/api_client/test/test_models/test_input_audio.py deleted file mode 100644 index 10aae18b..00000000 --- a/launch/api_client/test/test_models/test_input_audio.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.input_audio import InputAudio - - -class TestInputAudio(unittest.TestCase): - """InputAudio unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_json_schema.py b/launch/api_client/test/test_models/test_json_schema.py deleted file mode 100644 index 17e55216..00000000 --- a/launch/api_client/test/test_models/test_json_schema.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.json_schema import JsonSchema - - -class TestJsonSchema(unittest.TestCase): - """JsonSchema unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/test/test_models/test_list_docker_image_batch_job_bundle_v1_response.py deleted file mode 100644 index 545b2e92..00000000 --- a/launch/api_client/test/test_models/test_list_docker_image_batch_job_bundle_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_docker_image_batch_job_bundle_v1_response import ( - ListDockerImageBatchJobBundleV1Response, -) - - -class TestListDockerImageBatchJobBundleV1Response(unittest.TestCase): - """ListDockerImageBatchJobBundleV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_docker_image_batch_jobs_v1_response.py b/launch/api_client/test/test_models/test_list_docker_image_batch_jobs_v1_response.py deleted file mode 100644 index d34e65e9..00000000 --- a/launch/api_client/test/test_models/test_list_docker_image_batch_jobs_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_docker_image_batch_jobs_v1_response import ( - ListDockerImageBatchJobsV1Response, -) - - -class TestListDockerImageBatchJobsV1Response(unittest.TestCase): - """ListDockerImageBatchJobsV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_files_response.py b/launch/api_client/test/test_models/test_list_files_response.py deleted file mode 100644 index 596f5970..00000000 --- a/launch/api_client/test/test_models/test_list_files_response.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_files_response import ListFilesResponse - - -class TestListFilesResponse(unittest.TestCase): - """ListFilesResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_fine_tunes_response.py b/launch/api_client/test/test_models/test_list_fine_tunes_response.py deleted file mode 100644 index 8c46f410..00000000 --- a/launch/api_client/test/test_models/test_list_fine_tunes_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_fine_tunes_response import ( - ListFineTunesResponse, -) - - -class TestListFineTunesResponse(unittest.TestCase): - """ListFineTunesResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_llm_model_endpoints_v1_response.py b/launch/api_client/test/test_models/test_list_llm_model_endpoints_v1_response.py deleted file mode 100644 index 7abfa977..00000000 --- a/launch/api_client/test/test_models/test_list_llm_model_endpoints_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_llm_model_endpoints_v1_response import ( - ListLLMModelEndpointsV1Response, -) - - -class TestListLLMModelEndpointsV1Response(unittest.TestCase): - """ListLLMModelEndpointsV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_model_bundles_v1_response.py b/launch/api_client/test/test_models/test_list_model_bundles_v1_response.py deleted file mode 100644 index 7dd7d091..00000000 --- a/launch/api_client/test/test_models/test_list_model_bundles_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_model_bundles_v1_response import ( - ListModelBundlesV1Response, -) - - -class TestListModelBundlesV1Response(unittest.TestCase): - """ListModelBundlesV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_model_bundles_v2_response.py b/launch/api_client/test/test_models/test_list_model_bundles_v2_response.py deleted file mode 100644 index 1c93723f..00000000 --- a/launch/api_client/test/test_models/test_list_model_bundles_v2_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_model_bundles_v2_response import ( - ListModelBundlesV2Response, -) - - -class TestListModelBundlesV2Response(unittest.TestCase): - """ListModelBundlesV2Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_model_endpoints_v1_response.py b/launch/api_client/test/test_models/test_list_model_endpoints_v1_response.py deleted file mode 100644 index fd8538a0..00000000 --- a/launch/api_client/test/test_models/test_list_model_endpoints_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_model_endpoints_v1_response import ( - ListModelEndpointsV1Response, -) - - -class TestListModelEndpointsV1Response(unittest.TestCase): - """ListModelEndpointsV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_list_triggers_v1_response.py b/launch/api_client/test/test_models/test_list_triggers_v1_response.py deleted file mode 100644 index 9e41cda6..00000000 --- a/launch/api_client/test/test_models/test_list_triggers_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.list_triggers_v1_response import ( - ListTriggersV1Response, -) - - -class TestListTriggersV1Response(unittest.TestCase): - """ListTriggersV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_llm_fine_tune_event.py b/launch/api_client/test/test_models/test_llm_fine_tune_event.py deleted file mode 100644 index 15faec4c..00000000 --- a/launch/api_client/test/test_models/test_llm_fine_tune_event.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.llm_fine_tune_event import LLMFineTuneEvent - - -class TestLLMFineTuneEvent(unittest.TestCase): - """LLMFineTuneEvent unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_llm_inference_framework.py b/launch/api_client/test/test_models/test_llm_inference_framework.py deleted file mode 100644 index b5866821..00000000 --- a/launch/api_client/test/test_models/test_llm_inference_framework.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.llm_inference_framework import ( - LLMInferenceFramework, -) - - -class TestLLMInferenceFramework(unittest.TestCase): - """LLMInferenceFramework unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_llm_source.py b/launch/api_client/test/test_models/test_llm_source.py deleted file mode 100644 index c581a450..00000000 --- a/launch/api_client/test/test_models/test_llm_source.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.llm_source import LLMSource - - -class TestLLMSource(unittest.TestCase): - """LLMSource unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_logprobs.py b/launch/api_client/test/test_models/test_logprobs.py deleted file mode 100644 index cec6bb02..00000000 --- a/launch/api_client/test/test_models/test_logprobs.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.logprobs import Logprobs - - -class TestLogprobs(unittest.TestCase): - """Logprobs unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_logprobs2.py b/launch/api_client/test/test_models/test_logprobs2.py deleted file mode 100644 index 3dd1c171..00000000 --- a/launch/api_client/test/test_models/test_logprobs2.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.logprobs2 import Logprobs2 - - -class TestLogprobs2(unittest.TestCase): - """Logprobs2 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_metadata.py b/launch/api_client/test/test_models/test_metadata.py deleted file mode 100644 index 3b8633e6..00000000 --- a/launch/api_client/test/test_models/test_metadata.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.metadata import Metadata - - -class TestMetadata(unittest.TestCase): - """Metadata unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_environment_params.py b/launch/api_client/test/test_models/test_model_bundle_environment_params.py deleted file mode 100644 index 1b1296d4..00000000 --- a/launch/api_client/test/test_models/test_model_bundle_environment_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) - - -class TestModelBundleEnvironmentParams(unittest.TestCase): - """ModelBundleEnvironmentParams unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_framework_type.py b/launch/api_client/test/test_models/test_model_bundle_framework_type.py deleted file mode 100644 index 88199a73..00000000 --- a/launch/api_client/test/test_models/test_model_bundle_framework_type.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_bundle_framework_type import ( - ModelBundleFrameworkType, -) - - -class TestModelBundleFrameworkType(unittest.TestCase): - """ModelBundleFrameworkType unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_order_by.py b/launch/api_client/test/test_models/test_model_bundle_order_by.py deleted file mode 100644 index b1ab76fb..00000000 --- a/launch/api_client/test/test_models/test_model_bundle_order_by.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_bundle_order_by import ModelBundleOrderBy - - -class TestModelBundleOrderBy(unittest.TestCase): - """ModelBundleOrderBy unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_packaging_type.py b/launch/api_client/test/test_models/test_model_bundle_packaging_type.py deleted file mode 100644 index bf85bde7..00000000 --- a/launch/api_client/test/test_models/test_model_bundle_packaging_type.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) - - -class TestModelBundlePackagingType(unittest.TestCase): - """ModelBundlePackagingType unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_v1_response.py b/launch/api_client/test/test_models/test_model_bundle_v1_response.py deleted file mode 100644 index ad0c1f16..00000000 --- a/launch/api_client/test/test_models/test_model_bundle_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_bundle_v1_response import ( - ModelBundleV1Response, -) - - -class TestModelBundleV1Response(unittest.TestCase): - """ModelBundleV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_v2_response.py b/launch/api_client/test/test_models/test_model_bundle_v2_response.py deleted file mode 100644 index 74e704b3..00000000 --- a/launch/api_client/test/test_models/test_model_bundle_v2_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_bundle_v2_response import ( - ModelBundleV2Response, -) - - -class TestModelBundleV2Response(unittest.TestCase): - """ModelBundleV2Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_download_request.py b/launch/api_client/test/test_models/test_model_download_request.py deleted file mode 100644 index 2fe09f35..00000000 --- a/launch/api_client/test/test_models/test_model_download_request.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_download_request import ModelDownloadRequest - - -class TestModelDownloadRequest(unittest.TestCase): - """ModelDownloadRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_download_response.py b/launch/api_client/test/test_models/test_model_download_response.py deleted file mode 100644 index d6a0285d..00000000 --- a/launch/api_client/test/test_models/test_model_download_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_download_response import ( - ModelDownloadResponse, -) - - -class TestModelDownloadResponse(unittest.TestCase): - """ModelDownloadResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_deployment_state.py b/launch/api_client/test/test_models/test_model_endpoint_deployment_state.py deleted file mode 100644 index 6824b916..00000000 --- a/launch/api_client/test/test_models/test_model_endpoint_deployment_state.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_endpoint_deployment_state import ( - ModelEndpointDeploymentState, -) - - -class TestModelEndpointDeploymentState(unittest.TestCase): - """ModelEndpointDeploymentState unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_order_by.py b/launch/api_client/test/test_models/test_model_endpoint_order_by.py deleted file mode 100644 index c2a28455..00000000 --- a/launch/api_client/test/test_models/test_model_endpoint_order_by.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_endpoint_order_by import ( - ModelEndpointOrderBy, -) - - -class TestModelEndpointOrderBy(unittest.TestCase): - """ModelEndpointOrderBy unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_resource_state.py b/launch/api_client/test/test_models/test_model_endpoint_resource_state.py deleted file mode 100644 index 6d3ee8ad..00000000 --- a/launch/api_client/test/test_models/test_model_endpoint_resource_state.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_endpoint_resource_state import ( - ModelEndpointResourceState, -) - - -class TestModelEndpointResourceState(unittest.TestCase): - """ModelEndpointResourceState unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_status.py b/launch/api_client/test/test_models/test_model_endpoint_status.py deleted file mode 100644 index 1e05870d..00000000 --- a/launch/api_client/test/test_models/test_model_endpoint_status.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_endpoint_status import ModelEndpointStatus - - -class TestModelEndpointStatus(unittest.TestCase): - """ModelEndpointStatus unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_type.py b/launch/api_client/test/test_models/test_model_endpoint_type.py deleted file mode 100644 index b7d64522..00000000 --- a/launch/api_client/test/test_models/test_model_endpoint_type.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.model_endpoint_type import ModelEndpointType - - -class TestModelEndpointType(unittest.TestCase): - """ModelEndpointType unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_parallel_tool_calls.py b/launch/api_client/test/test_models/test_parallel_tool_calls.py deleted file mode 100644 index 2a41d29f..00000000 --- a/launch/api_client/test/test_models/test_parallel_tool_calls.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.parallel_tool_calls import ParallelToolCalls - - -class TestParallelToolCalls(unittest.TestCase): - """ParallelToolCalls unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_prediction_content.py b/launch/api_client/test/test_models/test_prediction_content.py deleted file mode 100644 index 34a928b7..00000000 --- a/launch/api_client/test/test_models/test_prediction_content.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.prediction_content import PredictionContent - - -class TestPredictionContent(unittest.TestCase): - """PredictionContent unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt.py b/launch/api_client/test/test_models/test_prompt.py deleted file mode 100644 index f75bacfc..00000000 --- a/launch/api_client/test/test_models/test_prompt.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.prompt import Prompt - - -class TestPrompt(unittest.TestCase): - """Prompt unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt1.py b/launch/api_client/test/test_models/test_prompt1.py deleted file mode 100644 index 6d78b41d..00000000 --- a/launch/api_client/test/test_models/test_prompt1.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.prompt1 import Prompt1 - - -class TestPrompt1(unittest.TestCase): - """Prompt1 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt1_item.py b/launch/api_client/test/test_models/test_prompt1_item.py deleted file mode 100644 index 23e9c769..00000000 --- a/launch/api_client/test/test_models/test_prompt1_item.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.prompt1_item import Prompt1Item - - -class TestPrompt1Item(unittest.TestCase): - """Prompt1Item unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt_tokens_details.py b/launch/api_client/test/test_models/test_prompt_tokens_details.py deleted file mode 100644 index 6f98298b..00000000 --- a/launch/api_client/test/test_models/test_prompt_tokens_details.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.prompt_tokens_details import PromptTokensDetails - - -class TestPromptTokensDetails(unittest.TestCase): - """PromptTokensDetails unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_pytorch_framework.py b/launch/api_client/test/test_models/test_pytorch_framework.py deleted file mode 100644 index 04c07858..00000000 --- a/launch/api_client/test/test_models/test_pytorch_framework.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.pytorch_framework import PytorchFramework - - -class TestPytorchFramework(unittest.TestCase): - """PytorchFramework unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_quantization.py b/launch/api_client/test/test_models/test_quantization.py deleted file mode 100644 index 29b080e2..00000000 --- a/launch/api_client/test/test_models/test_quantization.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.quantization import Quantization - - -class TestQuantization(unittest.TestCase): - """Quantization unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_reasoning_effort.py b/launch/api_client/test/test_models/test_reasoning_effort.py deleted file mode 100644 index 7fe410e0..00000000 --- a/launch/api_client/test/test_models/test_reasoning_effort.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.reasoning_effort import ReasoningEffort - - -class TestReasoningEffort(unittest.TestCase): - """ReasoningEffort unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_request_schema.py b/launch/api_client/test/test_models/test_request_schema.py deleted file mode 100644 index 9dc3c17a..00000000 --- a/launch/api_client/test/test_models/test_request_schema.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.request_schema import RequestSchema - - -class TestRequestSchema(unittest.TestCase): - """RequestSchema unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_json_object.py b/launch/api_client/test/test_models/test_response_format_json_object.py deleted file mode 100644 index 23740fb2..00000000 --- a/launch/api_client/test/test_models/test_response_format_json_object.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.response_format_json_object import ( - ResponseFormatJsonObject, -) - - -class TestResponseFormatJsonObject(unittest.TestCase): - """ResponseFormatJsonObject unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_json_schema.py b/launch/api_client/test/test_models/test_response_format_json_schema.py deleted file mode 100644 index 227b1dbd..00000000 --- a/launch/api_client/test/test_models/test_response_format_json_schema.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.response_format_json_schema import ( - ResponseFormatJsonSchema, -) - - -class TestResponseFormatJsonSchema(unittest.TestCase): - """ResponseFormatJsonSchema unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_json_schema_schema.py b/launch/api_client/test/test_models/test_response_format_json_schema_schema.py deleted file mode 100644 index 17d3d1c9..00000000 --- a/launch/api_client/test/test_models/test_response_format_json_schema_schema.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.response_format_json_schema_schema import ( - ResponseFormatJsonSchemaSchema, -) - - -class TestResponseFormatJsonSchemaSchema(unittest.TestCase): - """ResponseFormatJsonSchemaSchema unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_text.py b/launch/api_client/test/test_models/test_response_format_text.py deleted file mode 100644 index e864dd69..00000000 --- a/launch/api_client/test/test_models/test_response_format_text.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.response_format_text import ResponseFormatText - - -class TestResponseFormatText(unittest.TestCase): - """ResponseFormatText unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_response_modalities.py b/launch/api_client/test/test_models/test_response_modalities.py deleted file mode 100644 index 5a279ccd..00000000 --- a/launch/api_client/test/test_models/test_response_modalities.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.response_modalities import ResponseModalities - - -class TestResponseModalities(unittest.TestCase): - """ResponseModalities unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_response_schema.py b/launch/api_client/test/test_models/test_response_schema.py deleted file mode 100644 index dcb01845..00000000 --- a/launch/api_client/test/test_models/test_response_schema.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.response_schema import ResponseSchema - - -class TestResponseSchema(unittest.TestCase): - """ResponseSchema unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_restart_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_restart_model_endpoint_v1_response.py deleted file mode 100644 index 0ca0631c..00000000 --- a/launch/api_client/test/test_models/test_restart_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.restart_model_endpoint_v1_response import ( - RestartModelEndpointV1Response, -) - - -class TestRestartModelEndpointV1Response(unittest.TestCase): - """RestartModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_runnable_image_flavor.py b/launch/api_client/test/test_models/test_runnable_image_flavor.py deleted file mode 100644 index e6dd8db1..00000000 --- a/launch/api_client/test/test_models/test_runnable_image_flavor.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.runnable_image_flavor import RunnableImageFlavor - - -class TestRunnableImageFlavor(unittest.TestCase): - """RunnableImageFlavor unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_service_tier.py b/launch/api_client/test/test_models/test_service_tier.py deleted file mode 100644 index 30ce36be..00000000 --- a/launch/api_client/test/test_models/test_service_tier.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.service_tier import ServiceTier - - -class TestServiceTier(unittest.TestCase): - """ServiceTier unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_stop_configuration.py b/launch/api_client/test/test_models/test_stop_configuration.py deleted file mode 100644 index 416271d3..00000000 --- a/launch/api_client/test/test_models/test_stop_configuration.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.stop_configuration import StopConfiguration - - -class TestStopConfiguration(unittest.TestCase): - """StopConfiguration unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_stop_configuration1.py b/launch/api_client/test/test_models/test_stop_configuration1.py deleted file mode 100644 index a8fe69ec..00000000 --- a/launch/api_client/test/test_models/test_stop_configuration1.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.stop_configuration1 import StopConfiguration1 - - -class TestStopConfiguration1(unittest.TestCase): - """StopConfiguration1 unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_stream_error.py b/launch/api_client/test/test_models/test_stream_error.py deleted file mode 100644 index 259edbc3..00000000 --- a/launch/api_client/test/test_models/test_stream_error.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.stream_error import StreamError - - -class TestStreamError(unittest.TestCase): - """StreamError unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_stream_error_content.py b/launch/api_client/test/test_models/test_stream_error_content.py deleted file mode 100644 index 1754e3dc..00000000 --- a/launch/api_client/test/test_models/test_stream_error_content.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.stream_error_content import StreamErrorContent - - -class TestStreamErrorContent(unittest.TestCase): - """StreamErrorContent unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_streaming_enhanced_runnable_image_flavor.py b/launch/api_client/test/test_models/test_streaming_enhanced_runnable_image_flavor.py deleted file mode 100644 index 9382521e..00000000 --- a/launch/api_client/test/test_models/test_streaming_enhanced_runnable_image_flavor.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) - - -class TestStreamingEnhancedRunnableImageFlavor(unittest.TestCase): - """StreamingEnhancedRunnableImageFlavor unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_request.py b/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_request.py deleted file mode 100644 index 453920ec..00000000 --- a/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.sync_endpoint_predict_v1_request import ( - SyncEndpointPredictV1Request, -) - - -class TestSyncEndpointPredictV1Request(unittest.TestCase): - """SyncEndpointPredictV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_response.py b/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_response.py deleted file mode 100644 index 761dbaf6..00000000 --- a/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.sync_endpoint_predict_v1_response import ( - SyncEndpointPredictV1Response, -) - - -class TestSyncEndpointPredictV1Response(unittest.TestCase): - """SyncEndpointPredictV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_task_status.py b/launch/api_client/test/test_models/test_task_status.py deleted file mode 100644 index b028e01b..00000000 --- a/launch/api_client/test/test_models/test_task_status.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.task_status import TaskStatus - - -class TestTaskStatus(unittest.TestCase): - """TaskStatus unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_tensorflow_framework.py b/launch/api_client/test/test_models/test_tensorflow_framework.py deleted file mode 100644 index e0985cc6..00000000 --- a/launch/api_client/test/test_models/test_tensorflow_framework.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.tensorflow_framework import TensorflowFramework - - -class TestTensorflowFramework(unittest.TestCase): - """TensorflowFramework unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_token_output.py b/launch/api_client/test/test_models/test_token_output.py deleted file mode 100644 index ad1d67af..00000000 --- a/launch/api_client/test/test_models/test_token_output.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.token_output import TokenOutput - - -class TestTokenOutput(unittest.TestCase): - """TokenOutput unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_tool_config.py b/launch/api_client/test/test_models/test_tool_config.py deleted file mode 100644 index 5502838c..00000000 --- a/launch/api_client/test/test_models/test_tool_config.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.tool_config import ToolConfig - - -class TestToolConfig(unittest.TestCase): - """ToolConfig unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_top_logprob.py b/launch/api_client/test/test_models/test_top_logprob.py deleted file mode 100644 index 6a007805..00000000 --- a/launch/api_client/test/test_models/test_top_logprob.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.top_logprob import TopLogprob - - -class TestTopLogprob(unittest.TestCase): - """TopLogprob unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_triton_enhanced_runnable_image_flavor.py b/launch/api_client/test/test_models/test_triton_enhanced_runnable_image_flavor.py deleted file mode 100644 index dacfe32e..00000000 --- a/launch/api_client/test/test_models/test_triton_enhanced_runnable_image_flavor.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) - - -class TestTritonEnhancedRunnableImageFlavor(unittest.TestCase): - """TritonEnhancedRunnableImageFlavor unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_completions_v2_request.py b/launch/api_client/test/test_models/test_update_batch_completions_v2_request.py deleted file mode 100644 index e2a443f3..00000000 --- a/launch/api_client/test/test_models/test_update_batch_completions_v2_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_batch_completions_v2_request import ( - UpdateBatchCompletionsV2Request, -) - - -class TestUpdateBatchCompletionsV2Request(unittest.TestCase): - """UpdateBatchCompletionsV2Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_completions_v2_response.py b/launch/api_client/test/test_models/test_update_batch_completions_v2_response.py deleted file mode 100644 index 65f21948..00000000 --- a/launch/api_client/test/test_models/test_update_batch_completions_v2_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_batch_completions_v2_response import ( - UpdateBatchCompletionsV2Response, -) - - -class TestUpdateBatchCompletionsV2Response(unittest.TestCase): - """UpdateBatchCompletionsV2Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_job_v1_request.py b/launch/api_client/test/test_models/test_update_batch_job_v1_request.py deleted file mode 100644 index 9bcdc368..00000000 --- a/launch/api_client/test/test_models/test_update_batch_job_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_batch_job_v1_request import ( - UpdateBatchJobV1Request, -) - - -class TestUpdateBatchJobV1Request(unittest.TestCase): - """UpdateBatchJobV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_job_v1_response.py b/launch/api_client/test/test_models/test_update_batch_job_v1_response.py deleted file mode 100644 index c445d59a..00000000 --- a/launch/api_client/test/test_models/test_update_batch_job_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_batch_job_v1_response import ( - UpdateBatchJobV1Response, -) - - -class TestUpdateBatchJobV1Response(unittest.TestCase): - """UpdateBatchJobV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_deep_speed_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_deep_speed_model_endpoint_request.py deleted file mode 100644 index ba1ac2f9..00000000 --- a/launch/api_client/test/test_models/test_update_deep_speed_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_deep_speed_model_endpoint_request import ( - UpdateDeepSpeedModelEndpointRequest, -) - - -class TestUpdateDeepSpeedModelEndpointRequest(unittest.TestCase): - """UpdateDeepSpeedModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_request.py b/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_request.py deleted file mode 100644 index a42c685f..00000000 --- a/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_docker_image_batch_job_v1_request import ( - UpdateDockerImageBatchJobV1Request, -) - - -class TestUpdateDockerImageBatchJobV1Request(unittest.TestCase): - """UpdateDockerImageBatchJobV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_response.py b/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_response.py deleted file mode 100644 index 93042b86..00000000 --- a/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_docker_image_batch_job_v1_response import ( - UpdateDockerImageBatchJobV1Response, -) - - -class TestUpdateDockerImageBatchJobV1Response(unittest.TestCase): - """UpdateDockerImageBatchJobV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_request.py deleted file mode 100644 index d26a95ab..00000000 --- a/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_llm_model_endpoint_v1_request import ( - UpdateLLMModelEndpointV1Request, -) - - -class TestUpdateLLMModelEndpointV1Request(unittest.TestCase): - """UpdateLLMModelEndpointV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_response.py deleted file mode 100644 index fcb9a57d..00000000 --- a/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_llm_model_endpoint_v1_response import ( - UpdateLLMModelEndpointV1Response, -) - - -class TestUpdateLLMModelEndpointV1Response(unittest.TestCase): - """UpdateLLMModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_update_model_endpoint_v1_request.py deleted file mode 100644 index 24ce845f..00000000 --- a/launch/api_client/test/test_models/test_update_model_endpoint_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_model_endpoint_v1_request import ( - UpdateModelEndpointV1Request, -) - - -class TestUpdateModelEndpointV1Request(unittest.TestCase): - """UpdateModelEndpointV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_update_model_endpoint_v1_response.py deleted file mode 100644 index 36a54c43..00000000 --- a/launch/api_client/test/test_models/test_update_model_endpoint_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_model_endpoint_v1_response import ( - UpdateModelEndpointV1Response, -) - - -class TestUpdateModelEndpointV1Response(unittest.TestCase): - """UpdateModelEndpointV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_sg_lang_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_sg_lang_model_endpoint_request.py deleted file mode 100644 index 8e0e604c..00000000 --- a/launch/api_client/test/test_models/test_update_sg_lang_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_sg_lang_model_endpoint_request import ( - UpdateSGLangModelEndpointRequest, -) - - -class TestUpdateSGLangModelEndpointRequest(unittest.TestCase): - """UpdateSGLangModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_text_generation_inference_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_text_generation_inference_model_endpoint_request.py deleted file mode 100644 index 7a87841b..00000000 --- a/launch/api_client/test/test_models/test_update_text_generation_inference_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_text_generation_inference_model_endpoint_request import ( - UpdateTextGenerationInferenceModelEndpointRequest, -) - - -class TestUpdateTextGenerationInferenceModelEndpointRequest(unittest.TestCase): - """UpdateTextGenerationInferenceModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_trigger_v1_request.py b/launch/api_client/test/test_models/test_update_trigger_v1_request.py deleted file mode 100644 index e0b8e004..00000000 --- a/launch/api_client/test/test_models/test_update_trigger_v1_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_trigger_v1_request import ( - UpdateTriggerV1Request, -) - - -class TestUpdateTriggerV1Request(unittest.TestCase): - """UpdateTriggerV1Request unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_trigger_v1_response.py b/launch/api_client/test/test_models/test_update_trigger_v1_response.py deleted file mode 100644 index 253d37d2..00000000 --- a/launch/api_client/test/test_models/test_update_trigger_v1_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_trigger_v1_response import ( - UpdateTriggerV1Response, -) - - -class TestUpdateTriggerV1Response(unittest.TestCase): - """UpdateTriggerV1Response unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_update_vllm_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_vllm_model_endpoint_request.py deleted file mode 100644 index 9b488dcd..00000000 --- a/launch/api_client/test/test_models/test_update_vllm_model_endpoint_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.update_vllm_model_endpoint_request import ( - UpdateVLLMModelEndpointRequest, -) - - -class TestUpdateVLLMModelEndpointRequest(unittest.TestCase): - """UpdateVLLMModelEndpointRequest unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_upload_file_response.py b/launch/api_client/test/test_models/test_upload_file_response.py deleted file mode 100644 index e734761a..00000000 --- a/launch/api_client/test/test_models/test_upload_file_response.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.upload_file_response import UploadFileResponse - - -class TestUploadFileResponse(unittest.TestCase): - """UploadFileResponse unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_url_citation.py b/launch/api_client/test/test_models/test_url_citation.py deleted file mode 100644 index 4e42efe6..00000000 --- a/launch/api_client/test/test_models/test_url_citation.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.url_citation import UrlCitation - - -class TestUrlCitation(unittest.TestCase): - """UrlCitation unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_user_location.py b/launch/api_client/test/test_models/test_user_location.py deleted file mode 100644 index 5c6048ea..00000000 --- a/launch/api_client/test/test_models/test_user_location.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.user_location import UserLocation - - -class TestUserLocation(unittest.TestCase): - """UserLocation unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_validation_error.py b/launch/api_client/test/test_models/test_validation_error.py deleted file mode 100644 index 3147cb65..00000000 --- a/launch/api_client/test/test_models/test_validation_error.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.validation_error import ValidationError - - -class TestValidationError(unittest.TestCase): - """ValidationError unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_voice_ids_shared.py b/launch/api_client/test/test_models/test_voice_ids_shared.py deleted file mode 100644 index dfe31382..00000000 --- a/launch/api_client/test/test_models/test_voice_ids_shared.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.voice_ids_shared import VoiceIdsShared - - -class TestVoiceIdsShared(unittest.TestCase): - """VoiceIdsShared unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_web_search_context_size.py b/launch/api_client/test/test_models/test_web_search_context_size.py deleted file mode 100644 index 457052f1..00000000 --- a/launch/api_client/test/test_models/test_web_search_context_size.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.web_search_context_size import ( - WebSearchContextSize, -) - - -class TestWebSearchContextSize(unittest.TestCase): - """WebSearchContextSize unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_web_search_location.py b/launch/api_client/test/test_models/test_web_search_location.py deleted file mode 100644 index 7cee87fd..00000000 --- a/launch/api_client/test/test_models/test_web_search_location.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.web_search_location import WebSearchLocation - - -class TestWebSearchLocation(unittest.TestCase): - """WebSearchLocation unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_web_search_options.py b/launch/api_client/test/test_models/test_web_search_options.py deleted file mode 100644 index ad6020c3..00000000 --- a/launch/api_client/test/test_models/test_web_search_options.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.web_search_options import WebSearchOptions - - -class TestWebSearchOptions(unittest.TestCase): - """WebSearchOptions unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/api_client/test/test_models/test_zip_artifact_flavor.py b/launch/api_client/test/test_models/test_zip_artifact_flavor.py deleted file mode 100644 index d301f34f..00000000 --- a/launch/api_client/test/test_models/test_zip_artifact_flavor.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import unittest - -import launch.api_client -from launch.api_client import configuration -from launch.api_client.model.zip_artifact_flavor import ZipArtifactFlavor - - -class TestZipArtifactFlavor(unittest.TestCase): - """ZipArtifactFlavor unit test stubs""" - - _configuration = configuration.Configuration() - - -if __name__ == "__main__": - unittest.main() diff --git a/launch/cli/__init__.py b/launch/cli/__init__.py deleted file mode 100644 index 3afeea93..00000000 --- a/launch/cli/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" -.. click:: launch.cli.bin:entry_point - :prog: scale-launch - :nested: full -""" diff --git a/launch/cli/batch_jobs.py b/launch/cli/batch_jobs.py deleted file mode 100644 index ba9ef48f..00000000 --- a/launch/cli/batch_jobs.py +++ /dev/null @@ -1,31 +0,0 @@ -from datetime import timedelta - -import click - -from launch.cli.client import init_client -from launch.cli.console import pretty_print, spinner - - -@click.group("batch-jobs") -@click.pass_context -def batch_jobs(ctx: click.Context): - """ - Batch Jobs is a wrapper around batch jobs in Scale Launch - """ - - -@batch_jobs.command("get") -@click.argument("job_id") -@click.pass_context -def get_bundle(ctx: click.Context, job_id: str): - """Print bundle info""" - client = init_client(ctx) - - with spinner(f"Fetching batch job '{job_id}'"): - batch_job = client.get_batch_async_response(job_id) - - pretty_print(f"status: {batch_job['status']}") - pretty_print(f"result: {batch_job['result']}") - pretty_print(f"duration: {timedelta(seconds=batch_job['duration'])}") - pretty_print(f"# tasks pending: {batch_job['num_tasks_pending']}") - pretty_print(f"# tasks completed: {batch_job['num_tasks_completed']}") diff --git a/launch/cli/bin.py b/launch/cli/bin.py deleted file mode 100644 index 26b5ce41..00000000 --- a/launch/cli/bin.py +++ /dev/null @@ -1,44 +0,0 @@ -import click - -from launch.cli.batch_jobs import batch_jobs -from launch.cli.bundles import bundles -from launch.cli.config import ContextObject, config, set_config -from launch.cli.endpoints import endpoints -from launch.cli.tasks import tasks - - -class RichGroup(click.Group): - def format_help(self, ctx, formatter): - formatter.width = 118 - formatter.write( - """ - This is the command line interface (CLI) package for Scale Launch. - - ██╗ █████╗ ██╗ ██╗███╗ ██╗ ██████╗██╗ ██╗ - ██║ ██╔══██╗██║ ██║████╗ ██║██╔════╝██║ ██║ - ██║ ███████║██║ ██║██╔██╗ ██║██║ ███████║ - ██║ ██╔══██║██║ ██║██║╚██╗██║██║ ██╔══██║ - ███████╗██║ ██║╚██████╔╝██║ ╚████║╚██████╗██║ ██║ - ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚═╝ ╚═╝ - -""" - ) - super().format_help(ctx, formatter) - - -@click.group("cli", cls=RichGroup) -@click.pass_context -def entry_point(ctx, **kwargs): - ctx.obj = ContextObject().load() - if ctx.obj.api_key is None: - ctx.invoke(set_config) - - -entry_point.add_command(batch_jobs) # type: ignore -entry_point.add_command(bundles) # type: ignore -entry_point.add_command(config) # type: ignore -entry_point.add_command(endpoints) # type: ignore -entry_point.add_command(tasks) # type: ignore - -if __name__ == "__main__": - entry_point() # pylint: disable=no-value-for-parameter diff --git a/launch/cli/bundles.py b/launch/cli/bundles.py deleted file mode 100644 index adce1287..00000000 --- a/launch/cli/bundles.py +++ /dev/null @@ -1,73 +0,0 @@ -import re -from typing import Optional - -import click -from rich.syntax import Syntax -from rich.table import Column, Table - -from launch.cli.client import init_client -from launch.cli.console import pretty_print, spinner - - -@click.group("bundles") -@click.pass_context -def bundles(ctx: click.Context): - """ - Bundles is a wrapper around model bundles in Scale Launch - """ - - -@bundles.command("list") -@click.option("--name", "-n", help="Regex to use to filter by name", default=None) -@click.pass_context -def list_bundles(ctx: click.Context, name: Optional[str]): - """ - List all of your Bundles - """ - client = init_client(ctx) - - table = Table( - Column("Bundle Id", overflow="fold", min_width=24), - "Bundle name", - "Location", - "Packaging type", - title="Bundles", - title_justify="left", - ) - with spinner("Fetching bundles"): - model_bundles = client.list_model_bundles() - for model_bundle in model_bundles: - if name is None or re.match(name, model_bundle.name): - table.add_row( - model_bundle.id, - model_bundle.name, - model_bundle.location, - model_bundle.packaging_type, - ) - pretty_print(table) - - -@bundles.command("get") -@click.argument("bundle_name") -@click.pass_context -def get_bundle(ctx: click.Context, bundle_name: str): - """Print bundle info""" - client = init_client(ctx) - - with spinner(f"Fetching bundle '{bundle_name}'"): - model_bundle = client.get_model_bundle(bundle_name) - - pretty_print(f"bundle_id: {model_bundle.id}") - pretty_print(f"bundle_name: {model_bundle.name}") - pretty_print(f"location: {model_bundle.location}") - pretty_print(f"packaging_type: {model_bundle.packaging_type}") - pretty_print(f"env_params: {model_bundle.env_params}") - pretty_print(f"requirements: {model_bundle.requirements}") - pretty_print(f"app_config: {model_bundle.app_config}") - - pretty_print("metadata:") - for meta_name, meta_value in model_bundle.metadata.items(): - # TODO print non-code metadata differently - pretty_print(f"{meta_name}:", style="yellow") - syntax = Syntax(meta_value, "python") - pretty_print(syntax) diff --git a/launch/cli/client.py b/launch/cli/client.py deleted file mode 100644 index 5bc8d28c..00000000 --- a/launch/cli/client.py +++ /dev/null @@ -1,16 +0,0 @@ -import functools - -import click - -import launch - - -# TODO: Does it make sense to instantiate the client in the context? -@functools.lru_cache() -def init_client(ctx: click.Context): - client = launch.LaunchClient( - api_key=ctx.obj.api_key, - endpoint=ctx.obj.gateway_endpoint, - self_hosted=ctx.obj.self_hosted, - ) - return client diff --git a/launch/cli/config.py b/launch/cli/config.py deleted file mode 100644 index 67ec2707..00000000 --- a/launch/cli/config.py +++ /dev/null @@ -1,82 +0,0 @@ -import json -import os -from dataclasses import asdict, dataclass -from typing import Optional - -import click -import questionary as q -from rich.console import Console -from rich.table import Table - - -@dataclass -class ContextObject: - self_hosted: Optional[bool] = False - gateway_endpoint: Optional[str] = None - api_key: Optional[str] = None - - @staticmethod - def config_path(): - config_dir = click.get_app_dir("launch") - if not os.path.exists(config_dir): - os.makedirs(config_dir) - return os.path.join(config_dir, "config.json") - - def load(self): - try: - with open(self.config_path(), "r", encoding="utf-8") as f: - new_items = json.load(f) - for key, value in new_items.items(): - if hasattr(self, key): - setattr(self, key, value) - except FileNotFoundError: - pass - - return self - - def save(self): - with open(self.config_path(), "w", encoding="utf-8") as f: - json.dump(asdict(self), f, indent=4) - - -@click.group("config") -@click.pass_context -def config(ctx: click.Context): - """ - Config is a wrapper around getting and setting your API key and other configuration options - """ - - -@config.command("get") -@click.pass_context -def get_config(ctx: click.Context): - table = Table( - "Self-Hosted", - "API Key", - "Gateway Endpoint", - ) - - table.add_row(str(ctx.obj.self_hosted), ctx.obj.api_key, ctx.obj.gateway_endpoint) - console = Console() - console.print(table) - - -@config.command("set") -@click.pass_context -def set_config(ctx: click.Context): - ctx.obj.api_key = q.text( - message="Your Scale API Key?", - default=ctx.obj.api_key or "", - validate=lambda x: isinstance(x, str) and len(x) > 16, # Arbitrary length right now - ).ask() - ctx.obj.self_hosted = q.confirm( - message="Is your installation of Launch self-hosted?", - default=ctx.obj.self_hosted, - ).ask() - if ctx.obj.self_hosted: - ctx.obj.gateway_endpoint = q.text( - message="Your Gateway Endpoint?", - default=ctx.obj.gateway_endpoint or "", - ).ask() - - ctx.obj.save() diff --git a/launch/cli/console.py b/launch/cli/console.py deleted file mode 100644 index 8a86b8ae..00000000 --- a/launch/cli/console.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Any, Optional - -import rich -from rich.console import Console as RichConsole - -rich_console = RichConsole(highlight=False) - - -def spinner(message: str): - """ - Shows a spinner until the with scope exits. - """ - return rich_console.status(f"[bold green]{message}") - - -def pretty_print(message: Any, style: Optional[str] = None, markup: Optional[bool] = None) -> None: - """ - Pretty prints to the console. - """ - if style or markup is not None: - rich_console.print(message, style=style, markup=markup) - else: - rich.print(message) diff --git a/launch/cli/endpoints.py b/launch/cli/endpoints.py deleted file mode 100644 index 57fa6181..00000000 --- a/launch/cli/endpoints.py +++ /dev/null @@ -1,259 +0,0 @@ -import re -from pprint import pformat -from typing import NamedTuple, Optional - -import click -import questionary as q -from rich.table import Table -from typing_extensions import Literal - -from launch.cli.client import init_client -from launch.cli.console import pretty_print, spinner -from launch.hooks import PostInferenceHooks - - -@click.group("endpoints") -@click.pass_context -def endpoints(ctx: click.Context): - """Endpoints is a wrapper around model endpoints in Scale Launch""" - - -class EndpointRow(NamedTuple): - id: str - endpoint_name: str - bundle_name: str - status: Literal["READY", "UPDATE_PENDING", "UPDATE_IN_PROGRESS", "UPDATE_FAILED", "DELETE_IN_PROGRESS"] - endpoint_type: Literal["async", "sync"] - min_workers: str # rich.table requires all strings - max_workers: str - available_workers: str - unavailable_workers: str - num_gpus: str - metadata: str - - -@click.pass_context -@endpoints.command("list") -@click.option("--name", "-n", help="Regex to use to filter by name", default=None) -@click.option("-o", "--orderby", required=False, type=click.Choice(EndpointRow._fields), help="How to order the table") -@click.option( - "-d", - "--descending", - required=False, - is_flag=True, - type=bool, - default=False, - help="Whether to sort in descending order", -) -@click.pass_context -def list_endpoints(ctx: click.Context, name: Optional[str], orderby, descending: bool): - """List all of your Endpoints""" - client = init_client(ctx) - - table = Table( - "Endpoint ID", - "Endpoint name", - "Bundle name", - "Status", - "Endpoint\ntype", - "Min\nWorkers", - "Max\nWorkers", - "Available\nWorkers", - "Unavailable\nWorkers", - "Num\nGPUs", - "Metadata", - title="Endpoints", - title_justify="left", - ) - - with spinner("Fetching model endpoints"): - model_endpoints = client.list_model_endpoints() - endpoint_rows = [] - for servable_endpoint in model_endpoints: - if name is None or re.match(name, servable_endpoint.model_endpoint.name): - row = EndpointRow( - servable_endpoint.model_endpoint.id, - servable_endpoint.model_endpoint.name, - servable_endpoint.model_endpoint.bundle_name, - servable_endpoint.model_endpoint.status, - servable_endpoint.model_endpoint.endpoint_type, - str((servable_endpoint.model_endpoint.deployment_state or {}).get("min_workers", "")), - str((servable_endpoint.model_endpoint.deployment_state or {}).get("max_workers", "")), - str((servable_endpoint.model_endpoint.deployment_state or {}).get("available_workers", "")), - str((servable_endpoint.model_endpoint.deployment_state or {}).get("unavailable_workers", "")), - str((servable_endpoint.model_endpoint.resource_state or {}).get("gpus", "0")), - str(servable_endpoint.model_endpoint.metadata or {}), - ) - endpoint_rows.append(row) - - if orderby is not None: - endpoint_rows = sorted(endpoint_rows, key=lambda x: getattr(x, orderby), reverse=descending) - - for row in endpoint_rows: - table.add_row(*row) - - pretty_print(table) - - -@endpoints.command("delete") -@click.argument("endpoint_name") -@click.pass_context -def delete_endpoint(ctx: click.Context, endpoint_name: str): - """Delete a model endpoint""" - client = init_client(ctx) - - with spinner(f"Deleting model endpoint '{endpoint_name}'"): - res = client.delete_model_endpoint(endpoint_name) - - pretty_print(res) - - -@endpoints.command("creation-logs") -@click.argument("endpoint_name") -@click.pass_context -def read_endpoint_creation_logs(ctx: click.Context, endpoint_name: str): - """Reads the creation logs for an endpoint""" - client = init_client(ctx) - - with spinner(f"Fetching creation logs for endpoint '{endpoint_name}'"): - res = client.read_endpoint_creation_logs(endpoint_name) - - # rich fails to render the text because it's already formatted - print(res) - - -@endpoints.command("get") -@click.argument("endpoint_name") -@click.pass_context -def get_endpoint(ctx: click.Context, endpoint_name: str): - """Print bundle info""" - client = init_client(ctx) - - with spinner(f"Fetching endpoint '{endpoint_name}'"): - model_endpoint = client.get_model_endpoint(endpoint_name).model_endpoint - - pretty_print(f"endpoint_id: {model_endpoint.id}") - pretty_print(f"endpoint_name: {model_endpoint.name}") - pretty_print(f"bundle_name: {model_endpoint.bundle_name}") - pretty_print(f"status: {model_endpoint.status}") - pretty_print(f"resource_state: {model_endpoint.resource_state}") - pretty_print(f"deployment_state: {model_endpoint.deployment_state}") - pretty_print(f"metadata: {model_endpoint.metadata}") - pretty_print(f"endpoint_type: {model_endpoint.endpoint_type}") - pretty_print(f"configs: {model_endpoint.configs}") - pretty_print(f"destination: {model_endpoint.destination}") - pretty_print(f"post-inference hooks: {model_endpoint.post_inference_hooks}") - pretty_print(f"default callback url: {model_endpoint.default_callback_url}") - - -def _validate_int(val: str) -> int: - try: - int(val) - return True - except ValueError: - pass - return False - - -def _dict_not_none_or_empty(**kwargs) -> dict: - return {k: v for k, v in kwargs.items() if v is not None and v != "" and v != []} - - -@endpoints.command("edit") -@click.argument("endpoint_name") -@click.pass_context -def edit_endpoint(ctx: click.Context, endpoint_name: str): - """Edit an endpoint""" - client = init_client(ctx) - - with spinner(f"Fetching endpoint '{endpoint_name}'"): - model_endpoint = client.get_model_endpoint(endpoint_name).model_endpoint - - model_bundles = client.list_model_bundles() - model_bundle_choices = [ - q.Choice( - f"Current bundle ({model_endpoint.bundle_name})", - value="", - checked=True, - ) - ] - for bundle in model_bundles: - model_bundle_choices.append(q.Choice(title=pformat(bundle), value=bundle)) - - post_inference_hooks_choices = [] - post_inference_hooks = model_endpoint.post_inference_hooks or [] - for hook in PostInferenceHooks: - value = hook.value # type: ignore - post_inference_hooks_choices.append(q.Choice(title=value, checked=value in post_inference_hooks)) - - if model_endpoint.status != "READY": - pretty_print(f"Endpoint '{endpoint_name}' is not ready. Please wait for it to be ready " "before editing.") - return - - model_bundle = q.select("Model bundle: ", choices=model_bundle_choices).ask() - resource_state = _dict_not_none_or_empty(**(model_endpoint.resource_state or {})) - deployment_state = _dict_not_none_or_empty(**(model_endpoint.deployment_state or {})) - cpus = q.text("Cpus: ", default=resource_state.get("cpus", "")).ask() - gpu_raw = q.text( - "Gpus: ", - default=str(resource_state.get("gpus", "")), - validate=_validate_int, - ).ask() - gpus = int(gpu_raw) - memory = q.text("Memory: ", default=resource_state.get("memory", "")).ask() - storage = q.text("Storage (optional): ", default=resource_state.get("storage", "")).ask() - gpu_type_prompt = "Gpu type (optional): " if gpus == 0 else "Gpu type: " - gpu_type = q.select( - gpu_type_prompt, - choices=[ - "", - "nvidia-hopper-h100-1g20g", - "nvidia-hopper-h100-3g40g", - "nvidia-hopper-h100", - "nvidia-tesla-t4", - "nvidia-ampere-a10", - "nvidia-ampere-a100", - ], - ).ask() - min_workers = q.text( - "Min workers: ", - default=str(deployment_state.get("min_workers", "")), - validate=_validate_int, - ).ask() - min_workers = int(min_workers) - max_workers = q.text( - "Max workers: ", - default=str(deployment_state.get("max_workers", "")), - validate=_validate_int, - ).ask() - max_workers = int(max_workers) - per_worker = q.text( - "Per worker: ", - default=str(deployment_state.get("per_worker", "")), - validate=_validate_int, - ).ask() - per_worker = int(per_worker) - post_inference_hooks = q.checkbox("Post-inference hooks: ", choices=post_inference_hooks_choices).ask() - default_callback_url = q.text( - "Default callback url (optional): ", - default=model_endpoint.default_callback_url or "", - ).ask() - - kwargs = _dict_not_none_or_empty( - model_bundle=model_bundle, - cpus=cpus, - memory=memory, - storage=storage, - gpus=gpus, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - gpu_type=gpu_type, - post_inference_hooks=post_inference_hooks, - default_callback_url=default_callback_url, - ) - - with spinner(f"Editing endpoint '{endpoint_name}'"): - # TODO: Print out a nice error message if the user passes in arguments - # that fail server-side validation. - client.edit_model_endpoint(model_endpoint=model_endpoint, **kwargs) diff --git a/launch/cli/tasks.py b/launch/cli/tasks.py deleted file mode 100644 index 19e8a12c..00000000 --- a/launch/cli/tasks.py +++ /dev/null @@ -1,55 +0,0 @@ -import json -from typing import Optional - -import click - -from launch.cli.client import init_client -from launch.model_endpoint import EndpointRequest - - -@click.group("tasks") -@click.pass_context -def tasks(ctx: click.Context): - """Tasks is a wrapper around sending requests to endpoints""" - - -@tasks.command("send") -@click.argument("endpoint_name") -@click.option("-r", "--request", help="input request as a json string") -@click.option("-f", "--json-file", help="json file containing request") -@click.pass_context -def send( - ctx: click.Context, - endpoint_name: str, - request: Optional[str], - json_file: Optional[str], -): - """Sends request to launch endpoint""" - - # Only allowed one kind of input - assert (request is not None) ^ (json_file is not None), "Please supply EITHER --request OR --json-file" - - if request is not None: - json_input = json.loads(request) - elif json_file is not None: - with open(json_file, "rb") as f: - json_input = json.load(f) - - client = init_client(ctx) - - model_endpoint = client.get_model_endpoint(endpoint_name) - print(f"Sending request to {endpoint_name} at {ctx.obj.gateway_endpoint}") - if model_endpoint.status() is None: - raise ValueError(f"Unable to find endpoint {endpoint_name}") - - if model_endpoint.status() != "READY": - print(f"Warning: endpoint is not ready get: {model_endpoint.status()}") - else: - kwargs = {"request": EndpointRequest(args=json_input, return_pickled=False)} - if model_endpoint.model_endpoint.endpoint_type == "async": - future = model_endpoint.predict(**kwargs) - response = future.get() # blocks until completion - else: - response = model_endpoint.predict(**kwargs) - - print(response) diff --git a/launch/client.py b/launch/client.py deleted file mode 100644 index df6f3d6f..00000000 --- a/launch/client.py +++ /dev/null @@ -1,3342 +0,0 @@ -import inspect # pylint: disable=C0302 -import json -import logging -import os -import shutil -import tempfile -from io import StringIO -from typing import ( - Any, - Callable, - Dict, - Iterable, - List, - Optional, - Type, - TypeVar, - Union, -) -from zipfile import ZipFile - -import cloudpickle -import requests -import sseclient -import yaml -from deprecation import deprecated -from frozendict import frozendict -from pydantic import BaseModel -from typing_extensions import Literal - -from launch.api_client import ApiClient, Configuration -from launch.api_client.apis.tags.default_api import DefaultApi -from launch.api_client.model.body_upload_file_v1_files_post import ( - BodyUploadFileV1FilesPost, -) -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.clone_model_bundle_v1_request import ( - CloneModelBundleV1Request, -) -from launch.api_client.model.clone_model_bundle_v2_request import ( - CloneModelBundleV2Request, -) -from launch.api_client.model.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) -from launch.api_client.model.completion_stream_v1_response import ( - CompletionStreamV1Response, -) -from launch.api_client.model.completion_sync_v1_request import ( - CompletionSyncV1Request, -) -from launch.api_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch.api_client.model.create_batch_job_v1_request import ( - CreateBatchJobV1Request, -) -from launch.api_client.model.create_docker_image_batch_job_bundle_v1_request import ( - CreateDockerImageBatchJobBundleV1Request, -) -from launch.api_client.model.create_docker_image_batch_job_v1_request import ( - CreateDockerImageBatchJobV1Request, -) -from launch.api_client.model.create_fine_tune_request import ( - CreateFineTuneRequest, -) -from launch.api_client.model.create_llm_model_endpoint_v1_request import ( - CreateLLMModelEndpointV1Request, -) -from launch.api_client.model.create_model_bundle_v1_request import ( - CreateModelBundleV1Request, -) -from launch.api_client.model.create_model_bundle_v2_request import ( - CreateModelBundleV2Request, -) -from launch.api_client.model.create_model_endpoint_v1_request import ( - CreateModelEndpointV1Request, -) -from launch.api_client.model.custom_framework import CustomFramework -from launch.api_client.model.endpoint_predict_v1_request import ( - EndpointPredictV1Request, -) -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_inference_framework import ( - LLMInferenceFramework, -) -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch.api_client.model.model_bundle_framework_type import ( - ModelBundleFrameworkType, -) -from launch.api_client.model.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.pytorch_framework import PytorchFramework -from launch.api_client.model.quantization import Quantization -from launch.api_client.model.runnable_image_flavor import RunnableImageFlavor -from launch.api_client.model.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) -from launch.api_client.model.tensorflow_framework import TensorflowFramework -from launch.api_client.model.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) -from launch.api_client.model.update_docker_image_batch_job_v1_request import ( - UpdateDockerImageBatchJobV1Request, -) -from launch.api_client.model.update_model_endpoint_v1_request import ( - UpdateModelEndpointV1Request, -) -from launch.api_client.model.zip_artifact_flavor import ZipArtifactFlavor -from launch.connection import Connection -from launch.constants import ( - BATCH_TASK_INPUT_SIGNED_URL_PATH, - DEFAULT_SCALE_ENDPOINT, - ENDPOINT_PATH, - MODEL_BUNDLE_SIGNED_URL_PATH, - SCALE_LAUNCH_V0_PATH, - SCALE_LAUNCH_V1_PATH, -) -from launch.docker_image_batch_job_bundle import ( - CreateDockerImageBatchJobBundleResponse, - DockerImageBatchJobBundleResponse, - ListDockerImageBatchJobBundleResponse, -) -from launch.file import ( - DeleteFileResponse, - GetFileContentResponse, - GetFileResponse, - ListFilesResponse, - UploadFileResponse, -) -from launch.find_packages import find_packages_from_imports, get_imports -from launch.fine_tune import ( - CancelFineTuneResponse, - CreateFineTuneResponse, - GetFineTuneEventsResponse, - GetFineTuneResponse, - ListFineTunesResponse, -) -from launch.hooks import PostInferenceHooks -from launch.make_batch_file import ( - make_batch_input_dict_file, - make_batch_input_file, -) -from launch.model import ModelDownloadResponse -from launch.model_bundle import ( - CreateModelBundleV2Response, - ListModelBundlesV2Response, - ModelBundle, - ModelBundleV2Response, -) -from launch.model_endpoint import ( - AsyncEndpoint, - Endpoint, - ModelEndpoint, - StreamingEndpoint, - SyncEndpoint, -) -from launch.pydantic_schemas import get_model_definitions -from launch.request_validation import validate_task_request - -DEFAULT_NETWORK_TIMEOUT_SEC = 120 -DEFAULT_LLM_COMPLETIONS_TIMEOUT = 300 - -logger = logging.getLogger(__name__) -logging.basicConfig() - -LaunchModel_T = TypeVar("LaunchModel_T") - - -def _model_bundle_to_name(model_bundle: Union[ModelBundle, str]) -> str: - if isinstance(model_bundle, ModelBundle): - return model_bundle.name - elif isinstance(model_bundle, str): - return model_bundle - else: - raise TypeError("model_bundle should be type ModelBundle or str") - - -def _model_bundle_to_id(model_bundle: Union[ModelBundle, str]) -> str: - if isinstance(model_bundle, ModelBundle): - if model_bundle.id is None: - raise ValueError( - "You need to pass in a ModelBundle that has an id, " - "i.e. one that has already been registered on the server" - ) - return model_bundle.id - elif isinstance(model_bundle, str): - return model_bundle - else: - raise TypeError("model_bundle should be type ModelBundle or str") - - -def _model_endpoint_to_name(model_endpoint: Union[ModelEndpoint, str]) -> str: - if isinstance(model_endpoint, ModelEndpoint): - return model_endpoint.name - elif isinstance(model_endpoint, str): - return model_endpoint - else: - raise TypeError("model_endpoint should be type ModelEndpoint or str") - - -def _add_app_config_to_bundle_create_payload(payload: Dict[str, Any], app_config: Optional[Union[Dict[str, Any], str]]): - """ - Edits a request payload (for creating a bundle) to include a (not serialized) app_config if it's - not None - """ - if isinstance(app_config, Dict): - payload["app_config"] = app_config - elif isinstance(app_config, str): - with open(app_config, "r") as f: # pylint: disable=unspecified-encoding - app_config_dict = yaml.safe_load(f) - payload["app_config"] = app_config_dict - - -def _get_model_bundle_framework( - pytorch_image_tag: Optional[str] = None, - tensorflow_version: Optional[str] = None, - custom_base_image_repository: Optional[str] = None, - custom_base_image_tag: Optional[str] = None, -): - if pytorch_image_tag is not None: - return PytorchFramework( - pytorch_image_tag=pytorch_image_tag, - framework_type=ModelBundleFrameworkType.PYTORCH, - ) - elif tensorflow_version is not None: - return TensorflowFramework( - tensorflow_version=tensorflow_version, - framework_type=ModelBundleFrameworkType.TENSORFLOW, - ) - elif custom_base_image_repository is not None and custom_base_image_tag is not None: - return CustomFramework( - image_repository=custom_base_image_repository, - image_tag=custom_base_image_tag, - framework_type=ModelBundleFrameworkType.CUSTOM_BASE_IMAGE, - ) - else: - raise ValueError( - "You must specify one of pytorch_image_tag, tensorflow_version, or " - "custom_base_image_repository and custom_base_image_tag" - ) - - -def dict_not_none(**kwargs): - return {k: v for k, v in kwargs.items() if v is not None} - - -class LaunchClient: - """Scale Launch Python Client.""" - - def __init__( - self, - api_key: str, - endpoint: Optional[str] = None, - self_hosted: bool = False, - use_path_with_custom_endpoint: bool = False, - ): - """ - Initializes a Scale Launch Client. - - Parameters: - api_key: Your Scale API key - endpoint: The Scale Launch Endpoint (this should not need to be changed) - self_hosted: True iff you are connecting to a self-hosted Scale Launch - use_path_with_custom_endpoint: True iff you are not using the default Scale Launch endpoint - but your endpoint has path routing (to SCALE_LAUNCH_VX_PATH) set up - """ - self.endpoint = endpoint or DEFAULT_SCALE_ENDPOINT - self.connection = Connection(api_key, self.endpoint + SCALE_LAUNCH_V0_PATH) - self.self_hosted = self_hosted - self.upload_bundle_fn: Optional[Callable[[str, str], None]] = None - self.upload_batch_csv_fn: Optional[Callable[[str, str], None]] = None - self.bundle_location_fn: Optional[Callable[[], str]] = None - self.batch_csv_location_fn: Optional[Callable[[], str]] = None - host = self.endpoint + SCALE_LAUNCH_V1_PATH if endpoint is None else self.endpoint - if use_path_with_custom_endpoint: - host = self.endpoint + SCALE_LAUNCH_V1_PATH - self.configuration = Configuration( - host=host, - discard_unknown_keys=True, - username=api_key, - password="", - ) - - def __repr__(self): - return f"LaunchClient(connection='{self.connection}')" - - def __eq__(self, other): - return self.connection == other.connection - - def register_upload_bundle_fn(self, upload_bundle_fn: Callable[[str, str], None]): - """ - For self-hosted mode only. Registers a function that handles model bundle upload. This - function is called as - - upload_bundle_fn(serialized_bundle, bundle_url) - - This function should directly write the contents of ``serialized_bundle`` as a - binary string into ``bundle_url``. - - See ``register_bundle_location_fn`` for more notes on the signature of ``upload_bundle_fn`` - - Parameters: - upload_bundle_fn: Function that takes in a serialized bundle (bytes type), - and uploads that bundle to an appropriate location. Only needed for self-hosted mode. - """ - self.upload_bundle_fn = upload_bundle_fn - - def register_upload_batch_csv_fn(self, upload_batch_csv_fn: Callable[[str, str], None]): - """ - For self-hosted mode only. Registers a function that handles batch text upload. This - function is called as - - upload_batch_csv_fn(csv_text, csv_url) - - This function should directly write the contents of ``csv_text`` as a text string into - ``csv_url``. - - Parameters: - upload_batch_csv_fn: Function that takes in a csv text (string type), - and uploads that bundle to an appropriate location. Only needed for self-hosted mode. - """ - self.upload_batch_csv_fn = upload_batch_csv_fn - - def register_bundle_location_fn(self, bundle_location_fn: Callable[[], str]): - """ - For self-hosted mode only. Registers a function that gives a location for a model bundle. - Should give different locations each time. This function is called as - ``bundle_location_fn()``, and should return a ``bundle_url`` that - ``register_upload_bundle_fn`` can take. - - Strictly, ``bundle_location_fn()`` does not need to return a ``str``. The only - requirement is that if ``bundle_location_fn`` returns a value of type ``T``, - then ``upload_bundle_fn()`` takes in an object of type T as its second argument (i.e. - bundle_url). - - Parameters: - bundle_location_fn: Function that generates bundle_urls for upload_bundle_fn. - """ - self.bundle_location_fn = bundle_location_fn - - def register_batch_csv_location_fn(self, batch_csv_location_fn: Callable[[], str]): - """ - For self-hosted mode only. Registers a function that gives a location for batch CSV - inputs. Should give different locations each time. This function is called as - batch_csv_location_fn(), and should return a batch_csv_url that upload_batch_csv_fn can - take. - - Strictly, batch_csv_location_fn() does not need to return a str. The only requirement is - that if batch_csv_location_fn returns a value of type T, then upload_batch_csv_fn() takes - in an object of type T as its second argument (i.e. batch_csv_url). - - Parameters: - batch_csv_location_fn: Function that generates batch_csv_urls for upload_batch_csv_fn. - """ - self.batch_csv_location_fn = batch_csv_location_fn - - def _upload_data(self, data: bytes) -> str: - if self.self_hosted: - if self.upload_bundle_fn is None: - raise ValueError("Upload_bundle_fn should be registered") - if self.bundle_location_fn is None: - raise ValueError("Need either bundle_location_fn to know where to upload bundles") - raw_bundle_url = self.bundle_location_fn() # type: ignore - self.upload_bundle_fn(data, raw_bundle_url) # type: ignore - else: - model_bundle_url = self.connection.post({}, MODEL_BUNDLE_SIGNED_URL_PATH) - s3_path = model_bundle_url["signedUrl"] - raw_bundle_url = f"s3://{model_bundle_url['bucket']}/{model_bundle_url['key']}" - requests.put(s3_path, data=data) - return raw_bundle_url - - def _get_bundle_url_from_base_paths(self, base_paths: List[str]) -> str: - tmpdir = tempfile.mkdtemp() - try: - zip_path = os.path.join(tmpdir, "bundle.zip") - _zip_directories(zip_path, base_paths) - with open(zip_path, "rb") as zip_f: - data = zip_f.read() - finally: - shutil.rmtree(tmpdir) - - raw_bundle_url = self._upload_data(data) - return raw_bundle_url - - def _upload_model_bundle( - self, - load_model_fn: Callable, - load_predict_fn: Callable, - ): - bundle = dict(load_model_fn=load_model_fn, load_predict_fn=load_predict_fn) - serialized_bundle = cloudpickle.dumps(bundle) - bundle_location = self._upload_data(data=serialized_bundle) - return bundle_location - - def _upload_schemas(self, request_schema: Type[BaseModel], response_schema: Type[BaseModel]) -> str: - model_definitions = get_model_definitions( - request_schema=request_schema, - response_schema=response_schema, - ) - model_definitions_encoded = json.dumps(model_definitions).encode() - return self._upload_data(model_definitions_encoded) - - def create_model_bundle_from_callable_v2( - self, - *, - model_bundle_name: str, - load_predict_fn: Callable[[LaunchModel_T], Callable[[Any], Any]], - load_model_fn: Callable[[], LaunchModel_T], - request_schema: Type[BaseModel], - response_schema: Type[BaseModel], - requirements: Optional[List[str]] = None, - pytorch_image_tag: Optional[str] = None, - tensorflow_version: Optional[str] = None, - custom_base_image_repository: Optional[str] = None, - custom_base_image_tag: Optional[str] = None, - app_config: Optional[Union[Dict[str, Any], str]] = None, - metadata: Optional[Dict[str, Any]] = None, - ) -> CreateModelBundleV2Response: - """ - Uploads and registers a model bundle to Scale Launch. - - Parameters: - model_bundle_name: Name of the model bundle. - - load_predict_fn: Function that takes in a model and returns a predict function. - When your model bundle is deployed, this predict function will be called as follows: - ``` - input = {"input": "some input"} # or whatever your request schema is. - - def load_model_fn(): - # load model - return model - - def load_predict_fn(model, app_config=None): - def predict_fn(input): - # do pre-processing - output = model(input) - # do post-processing - return output - return predict_fn - - predict_fn = load_predict_fn(load_model_fn(), app_config=optional_app_config) - response = predict_fn(input) - ``` - - load_model_fn: A function that, when run, loads a model. - - request_schema: A pydantic model that represents the request schema for the model - bundle. This is used to validate the request body for the model bundle's endpoint. - - response_schema: A pydantic model that represents the request schema for the model - bundle. This is used to validate the response for the model bundle's endpoint. - - requirements: List of pip requirements. - - pytorch_image_tag: The image tag for the PyTorch image that will be used to run the - bundle. Exactly one of ``pytorch_image_tag``, ``tensorflow_version``, or - ``custom_base_image_repository`` must be specified. - - tensorflow_version: The version of TensorFlow that will be used to run the bundle. - If not specified, the default version will be used. Exactly one of - ``pytorch_image_tag``, ``tensorflow_version``, or ``custom_base_image_repository`` - must be specified. - - custom_base_image_repository: The repository for a custom base image that will be - used to run the bundle. If not specified, the default base image will be used. - Exactly one of ``pytorch_image_tag``, ``tensorflow_version``, or - ``custom_base_image_repository`` must be specified. - - custom_base_image_tag: The tag for a custom base image that will be used to run the - bundle. Must be specified if ``custom_base_image_repository`` is specified. - - app_config: An optional dictionary of configuration values that will be passed to the - bundle when it is run. These values can be accessed by the bundle via the - ``app_config`` global variable. - - metadata: Metadata to record with the bundle. - - Returns: - An object containing the following keys: - - - ``model_bundle_id``: The ID of the created model bundle. - """ - nonnull_requirements = requirements or [] - bundle_location = self._upload_model_bundle(load_model_fn, load_predict_fn) - schema_location = self._upload_schemas(request_schema=request_schema, response_schema=response_schema) - framework = _get_model_bundle_framework( - pytorch_image_tag=pytorch_image_tag, - tensorflow_version=tensorflow_version, - custom_base_image_repository=custom_base_image_repository, - custom_base_image_tag=custom_base_image_tag, - ) - flavor = CloudpickleArtifactFlavor( - **dict_not_none( - flavor="cloudpickle_artifact", - load_predict_fn=inspect.getsource(load_predict_fn), - load_model_fn=inspect.getsource(load_model_fn), - framework=framework, - requirements=nonnull_requirements, - app_config=app_config, - location=bundle_location, - ) - ) - create_model_bundle_request = CreateModelBundleV2Request( - **dict_not_none( - name=model_bundle_name, - schema_location=schema_location, - flavor=flavor, - metadata=metadata, - ) - ) - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.create_model_bundle_v2_model_bundles_post( - body=create_model_bundle_request, - skip_deserialization=True, - ) - resp = CreateModelBundleV2Response.parse_raw(response.response.data) - - return resp - - def create_model_bundle_from_dirs_v2( - self, - *, - model_bundle_name: str, - base_paths: List[str], - load_predict_fn_module_path: str, - load_model_fn_module_path: str, - request_schema: Type[BaseModel], - response_schema: Type[BaseModel], - requirements_path: Optional[str] = None, - pytorch_image_tag: Optional[str] = None, - tensorflow_version: Optional[str] = None, - custom_base_image_repository: Optional[str] = None, - custom_base_image_tag: Optional[str] = None, - app_config: Optional[Dict[str, Any]] = None, - metadata: Optional[Dict[str, Any]] = None, - ) -> CreateModelBundleV2Response: - """ - Packages up code from one or more local filesystem folders and uploads them as a bundle - to Scale Launch. In this mode, a bundle is just local code instead of a serialized object. - - For example, if you have a directory structure like so, and your current working - directory is ``my_root``: - - ```text - my_root/ - my_module1/ - __init__.py - ...files and directories - my_inference_file.py - my_module2/ - __init__.py - ...files and directories - ``` - - then calling ``create_model_bundle_from_dirs_v2`` with ``base_paths=["my_module1", - "my_module2"]`` essentially creates a zip file without the root directory, e.g.: - - ```text - my_module1/ - __init__.py - ...files and directories - my_inference_file.py - my_module2/ - __init__.py - ...files and directories - ``` - - and these contents will be unzipped relative to the server side application root. Bear - these points in mind when referencing Python module paths for this bundle. For instance, - if ``my_inference_file.py`` has ``def f(...)`` as the desired inference loading function, - then the `load_predict_fn_module_path` argument should be `my_module1.my_inference_file.f`. - - Parameters: - model_bundle_name: The name of the model bundle you want to create. - - base_paths: A list of paths to directories that will be zipped up and uploaded - as a bundle. Each path must be relative to the current working directory. - - load_predict_fn_module_path: The Python module path to the function that will be - used to load the model for inference. This function should take in a path to a - model directory, and return a model object. The model object should be pickleable. - - load_model_fn_module_path: The Python module path to the function that will be - used to load the model for training. This function should take in a path to a - model directory, and return a model object. The model object should be pickleable. - - request_schema: A Pydantic model that defines the request schema for the bundle. - - response_schema: A Pydantic model that defines the response schema for the bundle. - - requirements_path: Path to a requirements.txt file that will be used to install - dependencies for the bundle. This file must be relative to the current working - directory. - - pytorch_image_tag: The image tag for the PyTorch image that will be used to run the - bundle. Exactly one of ``pytorch_image_tag``, ``tensorflow_version``, or - ``custom_base_image_repository`` must be specified. - - tensorflow_version: The version of TensorFlow that will be used to run the bundle. - If not specified, the default version will be used. Exactly one of - ``pytorch_image_tag``, ``tensorflow_version``, or ``custom_base_image_repository`` - must be specified. - - custom_base_image_repository: The repository for a custom base image that will be - used to run the bundle. If not specified, the default base image will be used. - Exactly one of ``pytorch_image_tag``, ``tensorflow_version``, or - ``custom_base_image_repository`` must be specified. - - custom_base_image_tag: The tag for a custom base image that will be used to run the - bundle. Must be specified if ``custom_base_image_repository`` is specified. - - app_config: An optional dictionary of configuration values that will be passed to the - bundle when it is run. These values can be accessed by the bundle via the - ``app_config`` global variable. - - metadata: Metadata to record with the bundle. - - Returns: - An object containing the following keys: - - - ``model_bundle_id``: The ID of the created model bundle. - """ - requirements = [] - if requirements_path is not None: - with open(requirements_path, "r", encoding="utf-8") as req_f: - requirements = req_f.read().splitlines() - bundle_location = self._get_bundle_url_from_base_paths(base_paths) - schema_location = self._upload_schemas(request_schema=request_schema, response_schema=response_schema) - framework = _get_model_bundle_framework( - pytorch_image_tag=pytorch_image_tag, - tensorflow_version=tensorflow_version, - custom_base_image_repository=custom_base_image_repository, - custom_base_image_tag=custom_base_image_tag, - ) - flavor = ZipArtifactFlavor( - **dict_not_none( - flavor="zip_artifact", - load_predict_fn_module_path=load_predict_fn_module_path, - load_model_fn_module_path=load_model_fn_module_path, - framework=framework, - requirements=requirements, - app_config=app_config, - location=bundle_location, - ) - ) - create_model_bundle_request = CreateModelBundleV2Request( - **dict_not_none( - name=model_bundle_name, - schema_location=schema_location, - flavor=flavor, - metadata=metadata, - ) - ) - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.create_model_bundle_v2_model_bundles_post( - body=create_model_bundle_request, - skip_deserialization=True, - ) - resp = CreateModelBundleV2Response.parse_raw(response.response.data) - - return resp - - def create_model_bundle_from_runnable_image_v2( - self, - *, - model_bundle_name: str, - request_schema: Type[BaseModel], - response_schema: Type[BaseModel], - repository: str, - tag: str, - command: List[str], - healthcheck_route: Optional[str] = None, - predict_route: Optional[str] = None, - env: Dict[str, str], - readiness_initial_delay_seconds: int, - metadata: Optional[Dict[str, Any]] = None, - ) -> CreateModelBundleV2Response: - """ - Create a model bundle from a runnable image. The specified ``command`` must start a process - that will listen for requests on port 5005 using HTTP. - - Inference requests must be served at the `POST /predict` route while the `GET /readyz` route is a healthcheck. - - Parameters: - model_bundle_name: The name of the model bundle you want to create. - - request_schema: A Pydantic model that defines the request schema for the bundle. - - response_schema: A Pydantic model that defines the response schema for the bundle. - - repository: The name of the Docker repository for the runnable image. - - tag: The tag for the runnable image. - - command: The command that will be used to start the process that listens for requests. - - predict_route: The endpoint route on the runnable image that will be called. - - healthcheck_route: The healthcheck endpoint route on the runnable image. - - env: A dictionary of environment variables that will be passed to the bundle when it - is run. - - readiness_initial_delay_seconds: The number of seconds to wait for the HTTP server to become ready and - successfully respond on its healthcheck. - - metadata: Metadata to record with the bundle. - - Returns: - An object containing the following keys: - - - ``model_bundle_id``: The ID of the created model bundle. - """ - schema_location = self._upload_schemas(request_schema=request_schema, response_schema=response_schema) - flavor = RunnableImageFlavor( - **dict_not_none( - flavor="runnable_image", - repository=repository, - tag=tag, - command=command, - healthcheck_route=healthcheck_route, - predict_route=predict_route, - env=env, - protocol="http", - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - ) - ) - create_model_bundle_request = CreateModelBundleV2Request( - **dict_not_none( - name=model_bundle_name, - schema_location=schema_location, - flavor=flavor, - metadata=metadata, - ) - ) - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.create_model_bundle_v2_model_bundles_post( - body=create_model_bundle_request, - skip_deserialization=True, - ) - resp = CreateModelBundleV2Response.parse_raw(response.response.data) - - return resp - - def create_model_bundle_from_streaming_enhanced_runnable_image_v2( - self, - *, - model_bundle_name: str, - request_schema: Type[BaseModel], - response_schema: Type[BaseModel], - repository: str, - tag: str, - command: Optional[List[str]] = None, - healthcheck_route: Optional[str] = None, - predict_route: Optional[str] = None, - streaming_command: List[str], - streaming_predict_route: Optional[str] = None, - env: Dict[str, str], - readiness_initial_delay_seconds: int, - metadata: Optional[Dict[str, Any]] = None, - ) -> CreateModelBundleV2Response: - """ - Create a model bundle from a runnable image. The specified ``command`` must start a process - that will listen for requests on port 5005 using HTTP. - - Inference requests must be served at the `POST /predict` route while the `GET /readyz` route is a healthcheck. - - Parameters: - model_bundle_name: The name of the model bundle you want to create. - - request_schema: A Pydantic model that defines the request schema for the bundle. - - response_schema: A Pydantic model that defines the response schema for the bundle. - - repository: The name of the Docker repository for the runnable image. - - tag: The tag for the runnable image. - - command: The command that will be used to start the process that listens for requests if - this bundle is used as a SYNC or ASYNC endpoint. - - healthcheck_route: The healthcheck endpoint route on the runnable image. - - predict_route: The endpoint route on the runnable image that will be called if this bundle is used as a SYNC - or ASYNC endpoint. - - streaming_command: The command that will be used to start the process that listens for - requests if this bundle is used as a STREAMING endpoint. - - streaming_predict_route: The endpoint route on the runnable image that will be called if this bundle is used - as a STREAMING endpoint. - - env: A dictionary of environment variables that will be passed to the bundle when it - is run. - - readiness_initial_delay_seconds: The number of seconds to wait for the HTTP server to become ready and - successfully respond on its healthcheck. - - metadata: Metadata to record with the bundle. - - Returns: - An object containing the following keys: - - - ``model_bundle_id``: The ID of the created model bundle. - """ - schema_location = self._upload_schemas(request_schema=request_schema, response_schema=response_schema) - flavor = StreamingEnhancedRunnableImageFlavor( - **dict_not_none( - flavor="streaming_enhanced_runnable_image", - repository=repository, - tag=tag, - command=command, - healthcheck_route=healthcheck_route, - predict_route=predict_route, - streaming_command=streaming_command, - streaming_predict_route=streaming_predict_route, - env=env, - protocol="http", - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - ) - ) - create_model_bundle_request = CreateModelBundleV2Request( - **dict_not_none( - name=model_bundle_name, - schema_location=schema_location, - flavor=flavor, - metadata=metadata, - ) - ) - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.create_model_bundle_v2_model_bundles_post( - body=create_model_bundle_request, - skip_deserialization=True, - ) - resp = CreateModelBundleV2Response.parse_raw(response.response.data) - - return resp - - def create_model_bundle_from_triton_enhanced_runnable_image_v2( - self, - *, - model_bundle_name: str, - request_schema: Type[BaseModel], - response_schema: Type[BaseModel], - repository: str, - tag: str, - command: List[str], - healthcheck_route: Optional[str] = None, - predict_route: Optional[str] = None, - env: Dict[str, str], - readiness_initial_delay_seconds: int, - triton_model_repository: str, - triton_model_replicas: Optional[Dict[str, str]] = None, - triton_num_cpu: float, - triton_commit_tag: str, - triton_storage: Optional[str] = None, - triton_memory: Optional[str] = None, - triton_readiness_initial_delay_seconds: int, - metadata: Optional[Dict[str, Any]] = None, - ) -> CreateModelBundleV2Response: - """ - Create a model bundle from a runnable image and a tritonserver image. - - Same requirements as :param:`create_model_bundle_from_runnable_image_v2` with additional constraints necessary - for configuring tritonserver's execution. - - Parameters: - model_bundle_name: The name of the model bundle you want to create. - - request_schema: A Pydantic model that defines the request schema for the bundle. - - response_schema: A Pydantic model that defines the response schema for the bundle. - - repository: The name of the Docker repository for the runnable image. - - tag: The tag for the runnable image. - - command: The command that will be used to start the process that listens for requests. - - predict_route: The endpoint route on the runnable image that will be called. - - healthcheck_route: The healthcheck endpoint route on the runnable image. - - env: A dictionary of environment variables that will be passed to the bundle when it - is run. - - readiness_initial_delay_seconds: The number of seconds to wait for the HTTP server to - become ready and successfully respond on its healthcheck. - - triton_model_repository: The S3 prefix that contains the contents of the model - repository, formatted according to - https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_repository.md - - triton_model_replicas: If supplied, the name and number of replicas to make for each - model. - - triton_num_cpu: Number of CPUs, fractional, to allocate to tritonserver. - - triton_commit_tag: The image tag of the specific trionserver version. - - triton_storage: Amount of storage space to allocate for the tritonserver container. - - triton_memory: Amount of memory to allocate for the tritonserver container. - - triton_readiness_initial_delay_seconds: Like readiness_initial_delay_seconds, but for - tritonserver's own healthcheck. - - metadata: Metadata to record with the bundle. - - Returns: - An object containing the following keys: - - - ``model_bundle_id``: The ID of the created model bundle. - """ - schema_location = self._upload_schemas(request_schema=request_schema, response_schema=response_schema) - flavor = TritonEnhancedRunnableImageFlavor( - **dict_not_none( - flavor="triton_enhanced_runnable_image", - repository=repository, - tag=tag, - command=command, - healthcheck_route=healthcheck_route, - predict_route=predict_route, - env=env, - protocol="http", - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - triton_model_repository=triton_model_repository, - triton_model_replicas=triton_model_replicas, - triton_num_cpu=triton_num_cpu, - triton_commit_tag=triton_commit_tag, - triton_storage=triton_storage, - triton_memory=triton_memory, - triton_readiness_initial_delay_seconds=triton_readiness_initial_delay_seconds, - ) - ) - create_model_bundle_request = CreateModelBundleV2Request( - **dict_not_none( - name=model_bundle_name, - schema_location=schema_location, - flavor=flavor, - metadata=metadata, - ) - ) - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.create_model_bundle_v2_model_bundles_post( - body=create_model_bundle_request, - skip_deserialization=True, - ) - resp = CreateModelBundleV2Response.parse_raw(response.response.data) - - return resp - - def get_model_bundle_v2(self, model_bundle_id: str) -> ModelBundleV2Response: - """ - Get a model bundle. - - Parameters: - model_bundle_id: The ID of the model bundle you want to get. - - Returns: - An object containing the following fields: - - - ``id``: The ID of the model bundle. - - ``name``: The name of the model bundle. - - ``flavor``: The flavor of the model bundle. Either `RunnableImage`, - `CloudpickleArtifact`, `ZipArtifact`, or `TritonEnhancedRunnableImageFlavor`. - - ``created_at``: The time the model bundle was created. - - ``metadata``: A dictionary of metadata associated with the model bundle. - - ``model_artifact_ids``: A list of IDs of model artifacts associated with the - bundle. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"model_bundle_id": model_bundle_id}) - response = api_instance.get_model_bundle_v2_model_bundles_model_bundle_id_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = ModelBundleV2Response.parse_raw(response.response.data) - - return resp - - def get_latest_model_bundle_v2(self, model_bundle_name: str) -> ModelBundleV2Response: - """ - Get the latest version of a model bundle. - - Parameters: - model_bundle_name: The name of the model bundle you want to get. - - Returns: - An object containing the following keys: - - - ``id``: The ID of the model bundle. - - ``name``: The name of the model bundle. - - ``schema_location``: The location of the schema for the model bundle. - - ``flavor``: The flavor of the model bundle. Either `RunnableImage`, - `CloudpickleArtifact`, `ZipArtifact`, or `TritonEnhancedRunnableImageFlavor`. - - ``created_at``: The time the model bundle was created. - - ``metadata``: A dictionary of metadata associated with the model bundle. - - ``model_artifact_ids``: A list of IDs of model artifacts associated with the - bundle. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - query_params = frozendict({"model_name": model_bundle_name}) - response = api_instance.get_latest_model_bundle_v2_model_bundles_latest_get( # type: ignore - query_params=query_params, - skip_deserialization=True, - ) - resp = ModelBundleV2Response.parse_raw(response.response.data) - - return resp - - def list_model_bundles_v2(self) -> ListModelBundlesV2Response: - """ - List all model bundles. - - Returns: - An object containing the following keys: - - - ``model_bundles``: A list of model bundles. Each model bundle is an object. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.list_model_bundles_v2_model_bundles_get(skip_deserialization=True) - resp = ListModelBundlesV2Response.parse_raw(response.response.data) - - return resp - - def clone_model_bundle_with_changes_v2( - self, - original_model_bundle_id: str, - new_app_config: Optional[Dict[str, Any]] = None, - ) -> CreateModelBundleV2Response: - """ - Clone a model bundle with an optional new ``app_config``. - - Parameters: - original_model_bundle_id: The ID of the model bundle you want to clone. - - new_app_config: A dictionary of new app config values to use for the cloned model. - - Returns: - An object containing the following keys: - - - ``model_bundle_id``: The ID of the cloned model bundle. - """ - clone_model_bundle_request = CloneModelBundleV2Request( - **dict_not_none( - original_model_bundle_id=original_model_bundle_id, - new_app_config=new_app_config, - ) - ) - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - body=clone_model_bundle_request, - skip_deserialization=True, - ) - resp = CreateModelBundleV2Response.parse_raw(response.response.data) - - return resp - - @deprecated(deprecated_in="1.0.0", details="Use create_model_bundle_from_dirs_v2.") - def create_model_bundle_from_dirs( - self, - *, - model_bundle_name: str, - base_paths: List[str], - requirements_path: str, - env_params: Dict[str, str], - load_predict_fn_module_path: str, - load_model_fn_module_path: str, - app_config: Optional[Union[Dict[str, Any], str]] = None, - request_schema: Optional[Type[BaseModel]] = None, - response_schema: Optional[Type[BaseModel]] = None, - ) -> ModelBundle: - """ - Warning: - This method is deprecated. Use - [``create_model_bundle_from_dirs_v2``](./#launch.client.LaunchClient.create_model_bundle_from_dirs_v2) - instead. - - Parameters: - model_bundle_name: The name of the model bundle you want to create. The name - must be unique across all bundles that you own. - - base_paths: The paths on the local filesystem where the bundle code lives. - - requirements_path: A path on the local filesystem where a ``requirements.txt`` file - lives. - - env_params: A dictionary that dictates environment information e.g. - the use of pytorch or tensorflow, which base image tag to use, etc. - Specifically, the dictionary should contain the following keys: - - - ``framework_type``: either ``tensorflow`` or ``pytorch``. - - PyTorch fields: - - ``pytorch_image_tag``: An image tag for the ``pytorch`` docker base image. The - list of tags can be found from https://hub.docker.com/r/pytorch/pytorch/tags - - Example: - ```py - { - "framework_type": "pytorch", - "pytorch_image_tag": "1.10.0-cuda11.3-cudnn8-runtime", - } - ``` - - load_predict_fn_module_path: A python module path for a function that, when called - with the output of load_model_fn_module_path, returns a function that carries out - inference. - - load_model_fn_module_path: A python module path for a function that returns a model. - The output feeds into the function located at load_predict_fn_module_path. - - app_config: Either a Dictionary that represents a YAML file contents or a local path - to a YAML file. - - request_schema: A pydantic model that represents the request schema for the model - bundle. This is used to validate the request body for the model bundle's endpoint. - - response_schema: A pydantic model that represents the request schema for the model - bundle. This is used to validate the response for the model bundle's endpoint. - Note: If request_schema is specified, then response_schema must also be specified. - """ - with open(requirements_path, "r", encoding="utf-8") as req_f: - requirements = req_f.read().splitlines() - - raw_bundle_url = self._get_bundle_url_from_base_paths(base_paths) - - schema_location = None - if bool(request_schema) ^ bool(response_schema): - raise ValueError("If request_schema is specified, then response_schema must also be specified.") - if request_schema is not None and response_schema is not None: - schema_location = self._upload_schemas(request_schema=request_schema, response_schema=response_schema) - - bundle_metadata = { - "load_predict_fn_module_path": load_predict_fn_module_path, - "load_model_fn_module_path": load_model_fn_module_path, - } - - logger.info( - "create_model_bundle_from_dirs: raw_bundle_url=%s", - raw_bundle_url, - ) - payload = dict( - packaging_type="zip", - bundle_name=model_bundle_name, - location=raw_bundle_url, - bundle_metadata=bundle_metadata, - requirements=requirements, - env_params=env_params, - schema_location=schema_location, - ) - _add_app_config_to_bundle_create_payload(payload, app_config) - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - framework = ModelBundleFrameworkType(env_params["framework_type"]) - env_params_copy = env_params.copy() - env_params_copy["framework_type"] = framework # type: ignore - env_params_obj = ModelBundleEnvironmentParams(**env_params_copy) # type: ignore - payload = dict_not_none( - env_params=env_params_obj, - location=raw_bundle_url, - name=model_bundle_name, - requirements=requirements, - packaging_type=ModelBundlePackagingType("zip"), - metadata=bundle_metadata, - app_config=payload.get("app_config"), - schema_location=schema_location, - ) - create_model_bundle_request = CreateModelBundleV1Request(**payload) # type: ignore - api_instance.create_model_bundle_v1_model_bundles_post( - body=create_model_bundle_request, - skip_deserialization=True, - ) - return ModelBundle(model_bundle_name) - - @deprecated(deprecated_in="1.0.0", details="Use create_model_bundle_from_callable_v2.") - def create_model_bundle( # pylint: disable=too-many-statements - self, - model_bundle_name: str, - env_params: Dict[str, str], - *, - load_predict_fn: Optional[Callable[[LaunchModel_T], Callable[[Any], Any]]] = None, - predict_fn_or_cls: Optional[Callable[[Any], Any]] = None, - requirements: Optional[List[str]] = None, - model: Optional[LaunchModel_T] = None, - load_model_fn: Optional[Callable[[], LaunchModel_T]] = None, - app_config: Optional[Union[Dict[str, Any], str]] = None, - globals_copy: Optional[Dict[str, Any]] = None, - request_schema: Optional[Type[BaseModel]] = None, - response_schema: Optional[Type[BaseModel]] = None, - ) -> ModelBundle: - """ - Warning: - This method is deprecated. Use - [`create_model_bundle_from_callable_v2`](./#create_model_bundle_from_callable_v2) instead. - - Parameters: - model_bundle_name: The name of the model bundle you want to create. The name - must be unique across all bundles that you own. - - predict_fn_or_cls: `Function` or a ``Callable`` class that runs end-to-end - (pre/post processing and model inference) on the call. i.e. - ``predict_fn_or_cls(REQUEST) -> RESPONSE``. - - model: Typically a trained Neural Network, e.g. a Pytorch module. - - Exactly one of ``model`` and ``load_model_fn`` must be provided. - - load_model_fn: A function that, when run, loads a model. This function is essentially - a deferred wrapper around the ``model`` argument. - - Exactly one of ``model`` and ``load_model_fn`` must be provided. - - load_predict_fn: Function that, when called with a model, returns a function that - carries out inference. - - If ``model`` is specified, then this is equivalent - to: - ``load_predict_fn(model, app_config=optional_app_config]) -> predict_fn`` - - Otherwise, if ``load_model_fn`` is specified, then this is equivalent to: - ``load_predict_fn(load_model_fn(), app_config=optional_app_config]) -> predict_fn`` - - In both cases, ``predict_fn`` is then the inference function, i.e.: - ``predict_fn(REQUEST) -> RESPONSE`` - - - requirements: A list of python package requirements, where each list element is of - the form ``==``, e.g. - - ``["tensorflow==2.3.0", "tensorflow-hub==0.11.0"]`` - - If you do not pass in a value for ``requirements``, then you must pass in - ``globals()`` for the ``globals_copy`` argument. - - app_config: Either a Dictionary that represents a YAML file contents or a local path - to a YAML file. - - env_params: A dictionary that dictates environment information e.g. - the use of pytorch or tensorflow, which base image tag to use, etc. - Specifically, the dictionary should contain the following keys: - - - ``framework_type``: either ``tensorflow`` or ``pytorch``. - PyTorch fields: - - ``pytorch_image_tag``: An image tag for the ``pytorch`` docker base image. The - list of tags can be found from https://hub.docker.com/r/pytorch/pytorch/tags. - - Example: - - .. code-block:: python - - { - "framework_type": "pytorch", - "pytorch_image_tag": "1.10.0-cuda11.3-cudnn8-runtime" - } - - - Tensorflow fields: - - ``tensorflow_version``: Version of tensorflow, e.g. ``"2.3.0"``. - - globals_copy: Dictionary of the global symbol table. Normally provided by - ``globals()`` built-in function. - - request_schema: A pydantic model that represents the request schema for the model - bundle. This is used to validate the request body for the model bundle's endpoint. - - response_schema: A pydantic model that represents the request schema for the model - bundle. This is used to validate the response for the model bundle's endpoint. - Note: If request_schema is specified, then response_schema must also be specified. - """ - # TODO(ivan): remove `disable=too-many-branches` when get rid of `load_*` functions - # pylint: disable=too-many-branches - - check_args = [ - predict_fn_or_cls is not None, - load_predict_fn is not None and model is not None, - load_predict_fn is not None and load_model_fn is not None, - ] - - if sum(check_args) != 1: - raise ValueError( - "A model bundle consists of exactly {predict_fn_or_cls}, {load_predict_fn + " - "model}, or {load_predict_fn + load_model_fn}. " - ) - # TODO should we try to catch when people intentionally pass both model and load_model_fn - # as None? - - if requirements is None: - # TODO explore: does globals() actually work as expected? Should we use globals_copy - # instead? - requirements_inferred = find_packages_from_imports(globals()) - requirements = [f"{key}=={value}" for key, value in requirements_inferred.items()] - logger.info( - "Using \n%s\n for model bundle %s", - requirements, - model_bundle_name, - ) - - # Prepare cloudpickle for external imports - if globals_copy: - for module in get_imports(globals_copy): - if module.__name__ == cloudpickle.__name__: - # Avoid recursion - # register_pickle_by_value does not work properly with itself - continue - cloudpickle.register_pickle_by_value(module) - - bundle: Union[Callable[[Any], Any], Dict[str, Any], None] # validate bundle - bundle_metadata = {} - # Create bundle - if predict_fn_or_cls: - bundle = predict_fn_or_cls - if inspect.isfunction(predict_fn_or_cls): - source_code = inspect.getsource(predict_fn_or_cls) - else: - source_code = inspect.getsource(predict_fn_or_cls.__class__) - bundle_metadata["predict_fn_or_cls"] = source_code - elif model is not None: - bundle = dict(model=model, load_predict_fn=load_predict_fn) - bundle_metadata["load_predict_fn"] = inspect.getsource(load_predict_fn) # type: ignore - else: - bundle = dict(load_model_fn=load_model_fn, load_predict_fn=load_predict_fn) - bundle_metadata["load_predict_fn"] = inspect.getsource(load_predict_fn) # type: ignore - bundle_metadata["load_model_fn"] = inspect.getsource(load_model_fn) # type: ignore - - serialized_bundle = cloudpickle.dumps(bundle) - raw_bundle_url = self._upload_data(data=serialized_bundle) - - schema_location = None - if bool(request_schema) ^ bool(response_schema): - raise ValueError("If request_schema is specified, then response_schema must also be specified.") - if request_schema is not None and response_schema is not None: - model_definitions = get_model_definitions( - request_schema=request_schema, - response_schema=response_schema, - ) - model_definitions_encoded = json.dumps(model_definitions).encode() - schema_location = self._upload_data(model_definitions_encoded) - - payload = dict( - packaging_type="cloudpickle", - bundle_name=model_bundle_name, - location=raw_bundle_url, - bundle_metadata=bundle_metadata, - requirements=requirements, - env_params=env_params, - schema_location=schema_location, - ) - - _add_app_config_to_bundle_create_payload(payload, app_config) - framework = ModelBundleFrameworkType(env_params["framework_type"]) - env_params_copy = env_params.copy() - env_params_copy["framework_type"] = framework # type: ignore - env_params_obj = ModelBundleEnvironmentParams(**env_params_copy) # type: ignore - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - payload = dict_not_none( - env_params=env_params_obj, - location=raw_bundle_url, - name=model_bundle_name, - requirements=requirements, - packaging_type=ModelBundlePackagingType("cloudpickle"), - metadata=bundle_metadata, - app_config=app_config, - schema_location=schema_location, - ) - create_model_bundle_request = CreateModelBundleV1Request(**payload) # type: ignore - api_instance.create_model_bundle_v1_model_bundles_post( - body=create_model_bundle_request, - skip_deserialization=True, - ) - # resp["data"]["name"] should equal model_bundle_name - # TODO check that a model bundle was created and no name collisions happened - return ModelBundle(model_bundle_name) - - # pylint: disable=too-many-branches - def create_model_endpoint( - self, - *, - endpoint_name: str, - model_bundle: Union[ModelBundle, str], - cpus: int = 3, - memory: str = "8Gi", - storage: str = "16Gi", - gpus: int = 0, - min_workers: int = 1, - max_workers: int = 1, - per_worker: int = 10, - gpu_type: Optional[str] = None, - endpoint_type: str = "sync", - high_priority: Optional[bool] = False, - post_inference_hooks: Optional[List[PostInferenceHooks]] = None, - default_callback_url: Optional[str] = None, - default_callback_auth_kind: Optional[Literal["basic", "mtls"]] = None, - default_callback_auth_username: Optional[str] = None, - default_callback_auth_password: Optional[str] = None, - default_callback_auth_cert: Optional[str] = None, - default_callback_auth_key: Optional[str] = None, - public_inference: Optional[bool] = None, - update_if_exists: bool = False, - labels: Optional[Dict[str, str]] = None, - ) -> Optional[Endpoint]: - """ - Creates and registers a model endpoint in Scale Launch. The returned object is an - instance of type ``Endpoint``, which is a base class of either ``SyncEndpoint`` or - ``AsyncEndpoint``. This is the object to which you sent inference requests. - - Parameters: - endpoint_name: The name of the model endpoint you want to create. The name - must be unique across all endpoints that you own. - - model_bundle: The ``ModelBundle`` that the endpoint should serve. - - cpus: Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater - than or equal to 1. - - memory: Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must - be a positive amount of memory. - - storage: Amount of local ephemeral storage each worker should get, e.g. "4Gi", - "512Mi", etc. This must be a positive amount of storage. - - gpus: Number of gpus each worker should get, e.g. 0, 1, etc. - - min_workers: The minimum number of workers. Must be greater than or equal to 0. This - should be determined by computing the minimum throughput of your workload and - dividing it by the throughput of a single worker. This field must be at least ``1`` - for synchronous endpoints. - - max_workers: The maximum number of workers. Must be greater than or equal to 0, - and as well as greater than or equal to ``min_workers``. This should be determined by - computing the maximum throughput of your workload and dividing it by the throughput - of a single worker. - - per_worker: The maximum number of concurrent requests that an individual worker can - service. Launch automatically scales the number of workers for the endpoint so that - each worker is processing ``per_worker`` requests, subject to the limits defined by - ``min_workers`` and ``max_workers``. - - - If the average number of concurrent requests per worker is lower than - ``per_worker``, then the number of workers will be reduced. - Otherwise, - if the average number of concurrent requests per worker is higher than - ``per_worker``, then the number of workers will be increased to meet the elevated - traffic. - - Here is our recommendation for computing ``per_worker``: - - 1. Compute ``min_workers`` and ``max_workers`` per your minimum and maximum - throughput requirements. 2. Determine a value for the maximum number of - concurrent requests in the workload. Divide this number by ``max_workers``. Doing - this ensures that the number of workers will "climb" to ``max_workers``. - - gpu_type: If specifying a non-zero number of gpus, this controls the type of gpu - requested. Here are the supported values: - - - ``nvidia-tesla-t4`` - - ``nvidia-ampere-a10`` - - ``nvidia-hopper-h100`` - - ``nvidia-hopper-h100-1g20g`` - - ``nvidia-hopper-h100-3g40g`` - - endpoint_type: Either ``"sync"``, ``"async"``, or ``"streaming"``. - - high_priority: Either ``True`` or ``False``. Enabling this will allow the created - endpoint to leverage the shared pool of prewarmed nodes for faster spinup time. - - post_inference_hooks: List of hooks to trigger after inference tasks are served. - - default_callback_url: The default callback url to use for async endpoints. - This can be overridden in the task parameters for each individual task. - post_inference_hooks must contain "callback" for the callback to be triggered. - - default_callback_auth_kind: The default callback auth kind to use for async endpoints. - Either "basic" or "mtls". This can be overridden in the task parameters for each - individual task. - - default_callback_auth_username: The default callback auth username to use. This only - applies if default_callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_password: The default callback auth password to use. This only - applies if default_callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_cert: The default callback auth cert to use. This only applies - if default_callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_key: The default callback auth key to use. This only applies - if default_callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - public_inference: If ``True``, this endpoint will be available to all user IDs for - inference. - - update_if_exists: If ``True``, will attempt to update the endpoint if it exists. - Otherwise, will unconditionally try to create a new endpoint. Note that endpoint - names for a given user must be unique, so attempting to call this function with - ``update_if_exists=False`` for an existing endpoint will raise an error. - - labels: An optional dictionary of key/value pairs to associate with this endpoint. - - Returns: - A Endpoint object that can be used to make requests to the endpoint. - - """ - existing_endpoint = self.get_model_endpoint(endpoint_name) - if update_if_exists and existing_endpoint: - self.edit_model_endpoint( - model_endpoint=endpoint_name, - model_bundle=model_bundle, - cpus=cpus, - memory=memory, - storage=storage, - gpus=gpus, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - gpu_type=gpu_type, - high_priority=high_priority, - default_callback_url=default_callback_url, - default_callback_auth_kind=default_callback_auth_kind, - default_callback_auth_username=default_callback_auth_username, - default_callback_auth_password=default_callback_auth_password, - default_callback_auth_cert=default_callback_auth_cert, - default_callback_auth_key=default_callback_auth_key, - public_inference=public_inference, - ) - return existing_endpoint - else: - # Presumably, the user knows that the endpoint doesn't already exist, and so we can - # defer to the server to reject any duplicate creations. - logger.info("Creating new endpoint") - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - if not isinstance(model_bundle, ModelBundle) or model_bundle.id is None: - model_bundle = self.get_model_bundle(model_bundle) - post_inference_hooks_strs = None - if post_inference_hooks is not None: - post_inference_hooks_strs = [] - for hook in post_inference_hooks: - if isinstance(hook, PostInferenceHooks): - post_inference_hooks_strs.append(hook.value) - else: - post_inference_hooks_strs.append(hook) - - if default_callback_auth_kind is not None: - default_callback_auth = CallbackAuth( - **dict_not_none( - kind=default_callback_auth_kind, - username=default_callback_auth_username, - password=default_callback_auth_password, - cert=default_callback_auth_cert, - key=default_callback_auth_key, - ) - ) - else: - default_callback_auth = None - - payload = dict_not_none( - cpus=cpus, - endpoint_type=ModelEndpointType(endpoint_type), - gpus=gpus, - gpu_type=GpuType(gpu_type) if gpu_type is not None else None, - labels=labels or {}, - max_workers=max_workers, - memory=memory, - metadata={}, - min_workers=min_workers, - model_bundle_id=model_bundle.id, - name=endpoint_name, - per_worker=per_worker, - high_priority=high_priority, - post_inference_hooks=post_inference_hooks_strs, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - storage=storage, - public_inference=public_inference, - ) - create_model_endpoint_request = CreateModelEndpointV1Request(**payload) - response = api_instance.create_model_endpoint_v1_model_endpoints_post( - body=create_model_endpoint_request, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - endpoint_creation_task_id = resp.get("endpoint_creation_task_id", None) # TODO probably throw on None - logger.info("Endpoint creation task id is %s", endpoint_creation_task_id) - model_endpoint = ModelEndpoint(name=endpoint_name, bundle_name=model_bundle.name) - if endpoint_type == "async": - return AsyncEndpoint(model_endpoint=model_endpoint, client=self) - elif endpoint_type == "sync": - return SyncEndpoint(model_endpoint=model_endpoint, client=self) - elif endpoint_type == "streaming": - return StreamingEndpoint(model_endpoint=model_endpoint, client=self) - else: - raise ValueError("Endpoint should be one of the types 'sync', 'async', or 'streaming'") - - def edit_model_endpoint( - self, - *, - model_endpoint: Union[ModelEndpoint, str], - model_bundle: Optional[Union[ModelBundle, str]] = None, - cpus: Optional[float] = None, - memory: Optional[str] = None, - storage: Optional[str] = None, - gpus: Optional[int] = None, - min_workers: Optional[int] = None, - max_workers: Optional[int] = None, - per_worker: Optional[int] = None, - gpu_type: Optional[str] = None, - high_priority: Optional[bool] = None, - post_inference_hooks: Optional[List[PostInferenceHooks]] = None, - default_callback_url: Optional[str] = None, - default_callback_auth_kind: Optional[Literal["basic", "mtls"]] = None, - default_callback_auth_username: Optional[str] = None, - default_callback_auth_password: Optional[str] = None, - default_callback_auth_cert: Optional[str] = None, - default_callback_auth_key: Optional[str] = None, - public_inference: Optional[bool] = None, - ) -> None: - """ - Edits an existing model endpoint. Here are the fields that **cannot** be edited on an - existing endpoint: - - - The endpoint's name. - The endpoint's type (i.e. you cannot go from a ``SyncEnpdoint`` - to an ``AsyncEndpoint`` or vice versa. - - Parameters: - model_endpoint: The model endpoint (or its name) you want to edit. The name - must be unique across all endpoints that you own. - - model_bundle: The ``ModelBundle`` that the endpoint should serve. - - cpus: Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater - than or equal to 1. - - memory: Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must - be a positive amount of memory. - - storage: Amount of local ephemeral storage each worker should get, e.g. "4Gi", - "512Mi", etc. This must be a positive amount of storage. - - gpus: Number of gpus each worker should get, e.g. 0, 1, etc. - - min_workers: The minimum number of workers. Must be greater than or equal to 0. - - max_workers: The maximum number of workers. Must be greater than or equal to 0, - and as well as greater than or equal to ``min_workers``. - - per_worker: The maximum number of concurrent requests that an individual worker can - service. Launch automatically scales the number of workers for the endpoint so that - each worker is processing ``per_worker`` requests: - - - If the average number of concurrent requests per worker is lower than - ``per_worker``, then the number of workers will be reduced. - Otherwise, - if the average number of concurrent requests per worker is higher than - ``per_worker``, then the number of workers will be increased to meet the elevated - traffic. - - gpu_type: If specifying a non-zero number of gpus, this controls the type of gpu - requested. Here are the supported values: - - - ``nvidia-tesla-t4`` - - ``nvidia-ampere-a10`` - - ``nvidia-hopper-h100`` - - ``nvidia-hopper-h100-1g20g`` - - ``nvidia-hopper-h100-3g40g`` - - high_priority: Either ``True`` or ``False``. Enabling this will allow the created - endpoint to leverage the shared pool of prewarmed nodes for faster spinup time. - - post_inference_hooks: List of hooks to trigger after inference tasks are served. - - default_callback_url: The default callback url to use for async endpoints. - This can be overridden in the task parameters for each individual task. - post_inference_hooks must contain "callback" for the callback to be triggered. - - default_callback_auth_kind: The default callback auth kind to use for async endpoints. - Either "basic" or "mtls". This can be overridden in the task parameters for each - individual task. - - default_callback_auth_username: The default callback auth username to use. This only - applies if default_callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_password: The default callback auth password to use. This only - applies if default_callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_cert: The default callback auth cert to use. This only applies - if default_callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_key: The default callback auth key to use. This only applies - if default_callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - public_inference: If ``True``, this endpoint will be available to all user IDs for - inference. - """ - logger.info("Editing existing endpoint") - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - - if model_bundle is None: - model_bundle_id = None - elif isinstance(model_bundle, ModelBundle) and model_bundle.id is not None: - model_bundle_id = model_bundle.id - else: - model_bundle = self.get_model_bundle(model_bundle) - model_bundle_id = model_bundle.id - - if model_endpoint is None: - model_endpoint_id = None - elif isinstance(model_endpoint, ModelEndpoint) and model_endpoint.id is not None: - model_endpoint_id = model_endpoint.id - else: - endpoint_name = _model_endpoint_to_name(model_endpoint) - model_endpoint_full = self.get_model_endpoint(endpoint_name) - model_endpoint_id = model_endpoint_full.model_endpoint.id # type: ignore - - post_inference_hooks_strs = None - if post_inference_hooks is not None: - post_inference_hooks_strs = [] - for hook in post_inference_hooks: - if isinstance(hook, PostInferenceHooks): - post_inference_hooks_strs.append(hook.value) - else: - post_inference_hooks_strs.append(hook) - - if default_callback_auth_kind is not None: - default_callback_auth = CallbackAuth( - **dict_not_none( - kind=default_callback_auth_kind, - username=default_callback_auth_username, - password=default_callback_auth_password, - cert=default_callback_auth_cert, - key=default_callback_auth_key, - ) - ) - else: - default_callback_auth = None - - payload = dict_not_none( - cpus=cpus, - gpus=gpus, - gpu_type=GpuType(gpu_type) if gpu_type is not None else None, - max_workers=max_workers, - memory=memory, - min_workers=min_workers, - model_bundle_id=model_bundle_id, - per_worker=per_worker, - high_priority=high_priority, - post_inference_hooks=post_inference_hooks_strs, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - storage=storage, - public_inference=public_inference, - ) - update_model_endpoint_request = UpdateModelEndpointV1Request(**payload) - path_params = frozendict({"model_endpoint_id": model_endpoint_id}) - response = api_instance.update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( # type: ignore - body=update_model_endpoint_request, - path_params=path_params, # type: ignore - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - endpoint_creation_task_id = resp.get("endpoint_creation_task_id", None) # Returned from server as "creation" - logger.info("Endpoint edit task id is %s", endpoint_creation_task_id) - - def get_model_endpoint(self, endpoint_name: str) -> Optional[Union[AsyncEndpoint, SyncEndpoint]]: - """ - Gets a model endpoint associated with a name. - - Parameters: - endpoint_name: The name of the endpoint to retrieve. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - query_params = frozendict({"name": endpoint_name}) - response = api_instance.list_model_endpoints_v1_model_endpoints_get( # type: ignore - query_params=query_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - if len(resp["model_endpoints"]) == 0: - return None - resp = resp["model_endpoints"][0] - - if resp["endpoint_type"] == "async": - return AsyncEndpoint(ModelEndpoint.from_dict(resp), client=self) # type: ignore - elif resp["endpoint_type"] == "sync": - return SyncEndpoint(ModelEndpoint.from_dict(resp), client=self) # type: ignore - elif resp["endpoint_type"] == "streaming": - return StreamingEndpoint(ModelEndpoint.from_dict(resp), client=self) # type: ignore - else: - raise ValueError("Endpoint should be one of the types 'sync', 'async', or 'streaming'") - - def list_model_bundles(self) -> List[ModelBundle]: - """ - Returns a list of model bundles that the user owns. - - Returns: - A list of ModelBundle objects - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.list_model_bundles_v1_model_bundles_get(skip_deserialization=True) - resp = json.loads(response.response.data) - model_bundles = [ModelBundle.from_dict(item) for item in resp["model_bundles"]] # type: ignore - return model_bundles - - def get_model_bundle(self, model_bundle: Union[ModelBundle, str]) -> ModelBundle: - """ - Returns a model bundle specified by ``bundle_name`` that the user owns. - - Parameters: - model_bundle: The bundle or its name. - - Returns: - A ``ModelBundle`` object - """ - bundle_name = _model_bundle_to_name(model_bundle) - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - query_params = frozendict({"model_name": bundle_name}) - response = api_instance.get_latest_model_bundle_v1_model_bundles_latest_get( # type: ignore - query_params=query_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return ModelBundle.from_dict(resp) # type: ignore - - @deprecated(deprecated_in="1.0.0", details="Use create_model_bundle_from_callable_v2.") - def clone_model_bundle_with_changes( - self, - model_bundle: Union[ModelBundle, str], - app_config: Optional[Dict] = None, - ) -> ModelBundle: - """ - Warning: - This method is deprecated. Use - [`clone_model_bundle_with_changes_v2`](./#clone_model_bundle_with_changes_v2) instead. - - Parameters: - model_bundle: The existing bundle or its ID. - app_config: The new bundle's app config, if not passed in, the new - bundle's ``app_config`` will be set to ``None`` - - Returns: - A ``ModelBundle`` object - """ - - bundle_id = _model_bundle_to_id(model_bundle) - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - payload = dict_not_none( - original_model_bundle_id=bundle_id, - new_app_config=app_config, - ) - clone_model_bundle_request = CloneModelBundleV1Request(**payload) - response = ( - api_instance.clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( # noqa: E501 - body=clone_model_bundle_request, - skip_deserialization=True, - ) - ) - return json.loads(response.response.data) - - def list_model_endpoints(self) -> List[Endpoint]: - """ - Lists all model endpoints that the user owns. - - Returns: - A list of ``ModelEndpoint`` objects. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.list_model_endpoints_v1_model_endpoints_get(skip_deserialization=True) - resp = json.loads(response.response.data) - async_endpoints: List[Endpoint] = [ - AsyncEndpoint( - model_endpoint=ModelEndpoint.from_dict(endpoint), # type: ignore - client=self, - ) - for endpoint in resp["model_endpoints"] - if endpoint["endpoint_type"] == "async" - ] - sync_endpoints: List[Endpoint] = [ - SyncEndpoint( - model_endpoint=ModelEndpoint.from_dict(endpoint), # type: ignore - client=self, - ) - for endpoint in resp["model_endpoints"] - if endpoint["endpoint_type"] == "sync" - ] - streaming_endpoints: List[Endpoint] = [ - StreamingEndpoint( - model_endpoint=ModelEndpoint.from_dict(endpoint), # type: ignore - client=self, - ) - for endpoint in resp["model_endpoints"] - if endpoint["endpoint_type"] == "streaming" - ] - return async_endpoints + sync_endpoints + streaming_endpoints - - def delete_model_endpoint(self, model_endpoint_name: str): - """ - Deletes a model endpoint. - - Parameters: - model_endpoint: A ``ModelEndpoint`` object. - """ - endpoint = self.get_model_endpoint(model_endpoint_name) - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - model_endpoint_id = endpoint.model_endpoint.id # type: ignore - path_params = frozendict({"model_endpoint_id": model_endpoint_id}) - response = api_instance.delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp["deleted"] - - def read_endpoint_creation_logs(self, model_endpoint: Union[ModelEndpoint, str]): - """ - Retrieves the logs for the creation of the endpoint. - - Parameters: - model_endpoint: The endpoint or its name. - """ - endpoint_name = _model_endpoint_to_name(model_endpoint) - route = f"{ENDPOINT_PATH}/creation_logs/{endpoint_name}" - resp = self.connection.get(route) - return resp["content"] - - def _streaming_request( - self, - endpoint_name: str, - url: Optional[str] = None, - args: Optional[Dict] = None, - return_pickled: bool = False, - extra_headers: Optional[Dict[str, str]] = None, - ) -> requests.Response: - """ - Not recommended for use, instead use functions provided by StreamingEndpoint. Makes a - request to the Sync Model Endpoint at endpoint_id, and blocks until request completion or - timeout. Endpoint at endpoint_id must be a SyncEndpoint, otherwise this request will fail. - - Parameters: - endpoint_name: The name of the endpoint to make the request to - - url: A url that points to a file containing model input. Must be accessible by Scale - Launch, hence it needs to either be public or a signedURL. **Note**: the contents of - the file located at ``url`` are opened as a sequence of ``bytes`` and passed to the - predict function. If you instead want to pass the url itself as an input to the - predict function, see ``args``. - - args: A dictionary of arguments to the ``predict`` function defined in your model - bundle. Must be json-serializable, i.e. composed of ``str``, ``int``, ``float``, - etc. If your ``predict`` function has signature ``predict(foo, bar)``, then args - should be a dictionary with keys ``foo`` and ``bar``. Exactly one of ``url`` and - ``args`` must be specified. - - return_pickled: Whether the python object returned is pickled, or directly written to - the file returned. - - Returns: - A requests.Response object. - """ - validate_task_request(url=url, args=args) - endpoint = self.get_model_endpoint(endpoint_name) - endpoint_id = endpoint.model_endpoint.id # type: ignore - payload = dict_not_none(return_pickled=return_pickled, url=url, args=args) - response = requests.post( - url=f"{self.configuration.host}/v1/streaming-tasks?model_endpoint_id={endpoint_id}", - json=payload, - auth=(self.configuration.username, self.configuration.password), - stream=True, - headers=extra_headers or {}, - ) - return response - - def _sync_request( - self, - endpoint_name: str, - url: Optional[str] = None, - args: Optional[Dict] = None, - return_pickled: bool = False, - extra_headers: Optional[Dict[str, str]] = None, - ) -> Dict[str, Any]: - """ - Not recommended for use, instead use functions provided by SyncEndpoint Makes a request - to the Sync Model Endpoint at endpoint_id, and blocks until request completion or - timeout. Endpoint at endpoint_id must be a SyncEndpoint, otherwise this request will fail. - - Parameters: - endpoint_name: The name of the endpoint to make the request to - - url: A url that points to a file containing model input. Must be accessible by Scale - Launch, hence it needs to either be public or a signedURL. **Note**: the contents of - the file located at ``url`` are opened as a sequence of ``bytes`` and passed to the - predict function. If you instead want to pass the url itself as an input to the - predict function, see ``args``. - - args: A dictionary of arguments to the ``predict`` function defined in your model - bundle. Must be json-serializable, i.e. composed of ``str``, ``int``, ``float``, - etc. If your ``predict`` function has signature ``predict(foo, bar)``, then args - should be a dictionary with keys ``foo`` and ``bar``. Exactly one of ``url`` and - ``args`` must be specified. - - return_pickled: Whether the python object returned is pickled, or directly written to - the file returned. - - Returns: - A dictionary with key either ``"result_url"`` or ``"result"``, depending on the value - of ``return_pickled``. If ``return_pickled`` is true, the key will be ``"result_url"``, - and the value is a signedUrl that contains a cloudpickled Python object, - the result of running inference on the model input. - Example output: - ``https://foo.s3.us-west-2.amazonaws.com/bar/baz/qux?xyzzy`` - - Otherwise, if ``return_pickled`` is false, the key will be ``"result"``, - and the value is the output of the endpoint's ``predict`` function, serialized as json. - """ - validate_task_request(url=url, args=args) - endpoint = self.get_model_endpoint(endpoint_name) - endpoint_id = endpoint.model_endpoint.id # type: ignore - with ApiClient(self.configuration) as api_client: - for key, value in (extra_headers or {}).items(): - api_client.set_default_header(key, value) - api_instance = DefaultApi(api_client) - payload = dict_not_none(return_pickled=return_pickled, url=url, args=args) - request = EndpointPredictV1Request(**payload) - query_params = frozendict({"model_endpoint_id": endpoint_id}) - response = api_instance.create_sync_inference_task_v1_sync_tasks_post( # type: ignore - body=request, - query_params=query_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp - - def _async_request( - self, - endpoint_name: str, - *, - url: Optional[str] = None, - args: Optional[Dict] = None, - callback_url: Optional[str] = None, - callback_auth_kind: Optional[Literal["basic", "mtls"]] = None, - callback_auth_username: Optional[str] = None, - callback_auth_password: Optional[str] = None, - callback_auth_cert: Optional[str] = None, - callback_auth_key: Optional[str] = None, - return_pickled: bool = False, - extra_headers: Optional[Dict[str, str]] = None, - ) -> str: - """ - Makes a request to the Async Model Endpoint at endpoint_id, and immediately returns a key - that can be used to retrieve the result of inference at a later time. - - Parameters: - endpoint_name: The name of the endpoint to make the request to - - url: A url that points to a file containing model input. Must be accessible by Scale - Launch, hence it needs to either be public or a signedURL. **Note**: the contents of - the file located at ``url`` are opened as a sequence of ``bytes`` and passed to the - predict function. If you instead want to pass the url itself as an input to the - predict function, see ``args``. - - args: A dictionary of arguments to the ModelBundle's predict function. Must be - json-serializable, i.e. composed of ``str``, ``int``, ``float``, etc. If your predict - function has signature ``predict(foo, bar)``, then args should be a dictionary with - keys ``"foo"`` and ``"bar"``. - - Exactly one of ``url`` and ``args`` must be specified. - - callback_url: The callback url to use for this task. If None, then the - default_callback_url of the endpoint is used. The endpoint must specify - "callback" as a post-inference hook for the callback to be triggered. - - callback_auth_kind: The default callback auth kind to use for async endpoints. - Either "basic" or "mtls". This can be overridden in the task parameters for each - individual task. - - callback_auth_username: The default callback auth username to use. This only - applies if callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - callback_auth_password: The default callback auth password to use. This only - applies if callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - callback_auth_cert: The default callback auth cert to use. This only applies - if callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - callback_auth_key: The default callback auth key to use. This only applies - if callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - return_pickled: Whether the python object returned is pickled, or directly written to - the file returned. - - Returns: - An id/key that can be used to fetch inference results at a later time. - Example output: - `abcabcab-cabc-abca-0123456789ab` - """ - validate_task_request(url=url, args=args) - endpoint = self.get_model_endpoint(endpoint_name) - with ApiClient(self.configuration) as api_client: - for key, value in (extra_headers or {}).items(): - api_client.set_default_header(key, value) - api_instance = DefaultApi(api_client) - if callback_auth_kind is not None: - callback_auth = CallbackAuth( - **dict_not_none( - kind=callback_auth_kind, - username=callback_auth_username, - password=callback_auth_password, - cert=callback_auth_cert, - key=callback_auth_key, - ) - ) - else: - callback_auth = None - - payload = dict_not_none( - return_pickled=return_pickled, - url=url, - args=args, - callback_url=callback_url, - callback_auth=callback_auth, - ) - request = EndpointPredictV1Request(**payload) - model_endpoint_id = endpoint.model_endpoint.id # type: ignore - query_params = frozendict({"model_endpoint_id": model_endpoint_id}) - response = api_instance.create_async_inference_task_v1_async_tasks_post( # type: ignore - body=request, - query_params=query_params, # type: ignore - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp - - def _get_async_endpoint_response(self, endpoint_name: str, async_task_id: str) -> Dict[str, Any]: - """ - Not recommended to use this, instead we recommend to use functions provided by - AsyncEndpoint. Gets inference results from a previously created task. - - Parameters: - endpoint_name: The name of the endpoint the request was made to. - async_task_id: The id/key returned from a previous invocation of async_request. - - Returns: A dictionary that contains task status and optionally a result url or result if - the task has completed. Result url or result will be returned if the task has succeeded. - Will return a result url iff ``return_pickled`` was set to ``True`` on task creation. - - The dictionary's keys are as follows: - - - ``status``: ``'PENDING'`` or ``'SUCCESS'`` or ``'FAILURE'`` - ``result_url``: a url - pointing to inference results. This url is accessible for 12 hours after the request - has been made. - ``result``: the value returned by the endpoint's `predict` function, - serialized as json - - Example output: - - .. code-block:: json - - { - 'status': 'SUCCESS', - 'result_url': 'https://foo.s3.us-west-2.amazonaws.com/bar/baz/qux?xyzzy' - } - - """ - # TODO: do we want to read the results from here as well? i.e. translate result_url into - # a python object - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"task_id": async_task_id}) - response = api_instance.get_async_inference_task_v1_async_tasks_task_id_get( # type: ignore - path_params=path_params, # type: ignore - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp - - def batch_async_request( - self, - *, - model_bundle: Union[ModelBundle, str], - urls: Optional[List[str]] = None, - inputs: Optional[List[Dict[str, Any]]] = None, - batch_url_file_location: Optional[str] = None, - serialization_format: str = "JSON", - labels: Optional[Dict[str, str]] = None, - cpus: Optional[int] = None, - memory: Optional[str] = None, - gpus: Optional[int] = None, - gpu_type: Optional[str] = None, - storage: Optional[str] = None, - max_workers: Optional[int] = None, - per_worker: Optional[int] = None, - timeout_seconds: Optional[float] = None, - ) -> Dict[str, Any]: - """ - Sends a batch inference request using a given bundle. Returns a key that can be used to - retrieve the results of inference at a later time. - - Must have exactly one of urls or inputs passed in. - - Parameters: - model_bundle: The bundle or the name of a the bundle to use for inference. - - urls: A list of urls, each pointing to a file containing model input. Must be - accessible by Scale Launch, hence urls need to either be public or signedURLs. - - inputs: A list of model inputs, if exists, we will upload the inputs and pass it in - to Launch. - - batch_url_file_location: In self-hosted mode, the input to the batch job will be - uploaded to this location if provided. Otherwise, one will be determined from - bundle_location_fn() - - serialization_format: Serialization format of output, either 'PICKLE' or 'JSON'. - 'pickle' corresponds to pickling results + returning - - labels: An optional dictionary of key/value pairs to associate with this endpoint. - - cpus: Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater than - or equal to 1. - - memory: Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must be - a positive amount of memory. - - storage: Amount of local ephemeral storage each worker should get, e.g. "4Gi", "512Mi", - etc. This must be a positive amount of storage. - - gpus: Number of gpus each worker should get, e.g. 0, 1, etc. - - max_workers: The maximum number of workers. Must be greater than or equal to 0, and as - well as greater than or equal to ``min_workers``. - - per_worker: The maximum number of concurrent requests that an individual worker can - service. Launch automatically scales the number of workers for the endpoint so that - each worker is processing ``per_worker`` requests: - - - If the average number of concurrent requests per worker is lower than - ``per_worker``, then the number of workers will be reduced. - - Otherwise, if the average number of concurrent requests per worker is higher - than ``per_worker``, then the number of workers will be increased to meet the - elevated traffic. - - gpu_type: If specifying a non-zero number of gpus, this controls the type of gpu - requested. Here are the supported values: - - - ``nvidia-tesla-t4`` - - ``nvidia-ampere-a10`` - - ``nvidia-hopper-h100`` - - ``nvidia-hopper-h100-1g20g`` - - ``nvidia-hopper-h100-3g40g`` - - timeout_seconds: The maximum amount of time (in seconds) that the batch job can take. - If not specified, the server defaults to 12 hours. This includes the time required - to build the endpoint and the total time required for all the individual tasks. - - Returns: - A dictionary that contains `job_id` as a key, and the ID as the value. - """ - - if not bool(inputs) ^ bool(urls): - raise ValueError("Exactly one of inputs and urls is required for batch tasks") - - f = StringIO() - if urls: - make_batch_input_file(urls, f) - elif inputs: - make_batch_input_dict_file(inputs, f) - f.seek(0) - - if self.self_hosted: - # TODO make this not use bundle_location_fn() - location_fn = self.batch_csv_location_fn or self.bundle_location_fn - if location_fn is None and batch_url_file_location is None: - raise ValueError("Must register batch_csv_location_fn if csv file location not passed in") - file_location = batch_url_file_location or location_fn() # type: ignore - self.upload_batch_csv_fn(f.getvalue(), file_location) # type: ignore - else: - model_bundle_s3_url = self.connection.post({}, BATCH_TASK_INPUT_SIGNED_URL_PATH) - s3_path = model_bundle_s3_url["signedUrl"] - requests.put(s3_path, data=f.getvalue()) - file_location = f"s3://{model_bundle_s3_url['bucket']}/{model_bundle_s3_url['key']}" - - logger.info("Writing batch task csv to %s", file_location) - - if not isinstance(model_bundle, ModelBundle) or model_bundle.id is None: - model_bundle = self.get_model_bundle(model_bundle) - - resource_requests = dict_not_none( - cpus=cpus, - memory=memory, - gpus=gpus, - gpu_type=gpu_type, - storage=storage, - max_workers=max_workers, - per_worker=per_worker, - ) - payload = dict_not_none( - model_bundle_id=model_bundle.id, - input_path=file_location, - serialization_format=serialization_format, - labels=labels, - resource_requests=resource_requests, - timeout_seconds=timeout_seconds, - ) - request = CreateBatchJobV1Request(**payload) - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.create_batch_job_v1_batch_jobs_post( # type: ignore - body=request, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp - - def get_batch_async_response(self, batch_job_id: str) -> Dict[str, Any]: - """ - Gets inference results from a previously created batch job. - - Parameters: - batch_job_id: An id representing the batch task job. This id is the in the response from - calling ``batch_async_request``. - - Returns: - A dictionary that contains the following fields: - - - ``status``: The status of the job. - - ``result``: The url where the result is stored. - - ``duration``: A string representation of how long the job took to finish - or how long it has been running, for a job current in progress. - - ``num_tasks_pending``: The number of tasks that are still pending. - - ``num_tasks_completed``: The number of tasks that have completed. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"batch_job_id": batch_job_id}) - response = api_instance.get_batch_job_v1_batch_jobs_batch_job_id_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp - - def create_docker_image_batch_job_bundle( - self, - *, - name: str, - image_repository: str, - image_tag: str, - command: List[str], - env: Optional[Dict[str, str]] = None, - mount_location: Optional[str] = None, - cpus: Optional[int] = None, - memory: Optional[str] = None, - gpus: Optional[int] = None, - gpu_type: Optional[str] = None, - storage: Optional[str] = None, - ) -> CreateDockerImageBatchJobBundleResponse: - """ - For self hosted mode only. - - Creates a Docker Image Batch Job Bundle. - - Parameters: - name: - A user-defined name for the bundle. Does not need to be unique. - image_repository: - The (short) repository of your image. For example, if your image is located at - 123456789012.dkr.ecr.us-west-2.amazonaws.com/repo:tag, and your version of Launch - is configured to look at 123456789012.dkr.ecr.us-west-2.amazonaws.com for Docker Images, - you would pass the value `repo` for the `image_repository` parameter. - image_tag: - The tag of your image inside of the repo. In the example above, you would pass - the value `tag` for the `image_tag` parameter. - command: - The command to run inside the docker image. - env: - A dictionary of environment variables to inject into your docker image. - mount_location: - A location in the filesystem where you would like a json-formatted file, controllable - on runtime, to be mounted. This allows behavior to be specified on runtime. - (Specifically, the contents of this file can be read via `json.load()` inside of the - user-defined code.) - cpus: - Optional default value for the number of cpus to give the job. - memory: - Optional default value for the amount of memory to give the job. - gpus: - Optional default value for the number of gpus to give the job. - gpu_type: - Optional default value for the type of gpu to give the job. - storage: - Optional default value for the amount of disk to give the job. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - # Raw dictionary since it's not the entire request and we can get away with this - # Also having it be a CreateDockerImageBatchJobResourceRequest runs into problems - # if no values are specified - resource_requests = dict_not_none( - cpus=cpus, - memory=memory, - gpus=gpus, - gpu_type=gpu_type, - storage=storage, - ) - create_docker_image_batch_job_bundle_request = CreateDockerImageBatchJobBundleV1Request( - **dict_not_none( - name=name, - image_repository=image_repository, - image_tag=image_tag, - command=command, - env=env, - mount_location=mount_location, - resource_requests=resource_requests, - ) - ) - response = api_instance.create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - body=create_docker_image_batch_job_bundle_request, skip_deserialization=True - ) - resp = CreateDockerImageBatchJobBundleResponse.parse_raw(response.response.data) - return resp - - def get_docker_image_batch_job_bundle( - self, docker_image_batch_job_bundle_id: str - ) -> DockerImageBatchJobBundleResponse: - """ - For self hosted mode only. Gets information for a single batch job bundle with a given id. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id}) - response = api_instance.get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( # type: ignore # noqa: E501 - path_params=path_params, - skip_deserialization=True, - ) - resp = DockerImageBatchJobBundleResponse.parse_raw(response.response.data) - - return resp - - def get_latest_docker_image_batch_job_bundle(self, bundle_name: str) -> DockerImageBatchJobBundleResponse: - """ - For self hosted mode only. Gets information for the latest batch job bundle with a given name. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - query_params = frozendict({"bundle_name": bundle_name}) - response = api_instance.get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( # type: ignore # noqa: E501 - query_params=query_params, - skip_deserialization=True, - ) - resp = DockerImageBatchJobBundleResponse.parse_raw(response.response.data) - - return resp - - def list_docker_image_batch_job_bundles( - self, bundle_name: Optional[str] = None, order_by: Optional[Literal["newest", "oldest"]] = None - ) -> ListDockerImageBatchJobBundleResponse: - """ - For self hosted mode only. Gets information for multiple bundles. - - Parameters: - bundle_name: The name of the bundles to retrieve. If not specified, this will retrieve all - bundles. - order_by: Either "newest", "oldest", or not specified. Specify to sort by newest/oldest. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - query_params = frozendict(dict_not_none(bundle_name=bundle_name, order_by=order_by)) - response = api_instance.list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( # type: ignore # noqa: E501 - query_params=query_params, - skip_deserialization=True, - ) - resp = ListDockerImageBatchJobBundleResponse.parse_raw(response.response.data) - - return resp - - def create_docker_image_batch_job( - self, - *, - labels: Dict[str, str], - docker_image_batch_job_bundle: Optional[Union[str, DockerImageBatchJobBundleResponse]] = None, - docker_image_batch_job_bundle_name: Optional[str] = None, - job_config: Optional[Dict[str, Any]] = None, - cpus: Optional[int] = None, - memory: Optional[str] = None, - gpus: Optional[int] = None, - gpu_type: Optional[str] = None, - storage: Optional[str] = None, - ): - """ - For self hosted mode only. - Parameters: - docker_image_batch_job_bundle: Specifies the docker image bundle to use for the batch job. - Either the string id of a docker image bundle, or a - DockerImageBatchJobBundleResponse object. - Only one of docker_image_batch_job_bundle and docker_image_batch_job_bundle_name - can be specified. - docker_image_batch_job_bundle_name: The name of a batch job bundle. If specified, - Launch will use the most recent bundle with that name owned by the current user. - Only one of docker_image_batch_job_bundle and docker_image_batch_job_bundle_name - can be specified. - labels: Kubernetes labels that are present on the batch job. - job_config: A JSON-serializable python object that will get passed to the batch job, - specifically as the contents of a file mounted at `mount_location` inside the bundle. - You can call python's `json.load()` on the file to retrieve the contents. - cpus: Optional override for the number of cpus to give to your job. Either the default - must be specified in the bundle, or this must be specified. - memory: Optional override for the amount of memory to give to your job. Either the default - must be specified in the bundle, or this must be specified. - gpus: Optional number of gpus to give to the bundle. If not specified in the bundle or - here, will be interpreted as 0 gpus. - gpu_type: Optional type of gpu. If the final number of gpus is positive, must be specified - either in the bundle or here. - storage: Optional reserved amount of disk to give to your batch job. If not specified, - your job may be evicted if it is using too much disk. - """ - - assert (docker_image_batch_job_bundle is None) ^ ( - docker_image_batch_job_bundle_name is None - ), "Exactly one of docker_image_batch_job_bundle and docker_image_batch_job_bundle_name must be specified" - - if docker_image_batch_job_bundle is not None and isinstance( - docker_image_batch_job_bundle, DockerImageBatchJobBundleResponse - ): - docker_image_batch_job_bundle_id: Optional[str] = docker_image_batch_job_bundle.id - else: - docker_image_batch_job_bundle_id = docker_image_batch_job_bundle - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - # Raw dictionary since it's not the entire request and we can get away with this - # Also having it be a CreateDockerImageBatchJobResourceRequest runs into problems - # if no values are specified - resource_requests = dict_not_none( - cpus=cpus, - memory=memory, - gpus=gpus, - gpu_type=gpu_type, - storage=storage, - ) - create_docker_image_batch_job_request = CreateDockerImageBatchJobV1Request( - **dict_not_none( - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - docker_image_batch_job_bundle_name=docker_image_batch_job_bundle_name, - job_config=job_config, - labels=labels, - resource_requests=resource_requests, - ) - ) - response = api_instance.create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - body=create_docker_image_batch_job_request, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp - - def get_docker_image_batch_job(self, batch_job_id: str): - """ - For self hosted mode only. Gets information about a batch job given a batch job id. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"batch_job_id": batch_job_id}) - response = ( - api_instance.get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - ) - resp = json.loads(response.response.data) - - return resp - - def update_docker_image_batch_job(self, batch_job_id: str, cancel: bool): - """ - For self hosted mode only. Updates a batch job by id. - Use this if you want to cancel/delete a batch job. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"batch_job_id": batch_job_id}) - body = UpdateDockerImageBatchJobV1Request(cancel=cancel) - response = api_instance.update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( # type: ignore # noqa: E501 - body=body, - path_params=path_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - - return resp - - def create_llm_model_endpoint( - self, - endpoint_name: str, - # LLM specific fields - model_name: str, - inference_framework_image_tag: str, - source: LLMSource = LLMSource.HUGGING_FACE, - inference_framework: LLMInferenceFramework = LLMInferenceFramework.DEEPSPEED, - num_shards: int = 4, - quantize: Optional[Quantization] = None, - checkpoint_path: Optional[str] = None, - # General endpoint fields - cpus: int = 32, - memory: str = "192Gi", - storage: Optional[str] = None, - gpus: int = 4, - min_workers: int = 0, - max_workers: int = 1, - per_worker: int = 10, - gpu_type: Optional[str] = "nvidia-ampere-a10", - endpoint_type: str = "sync", - high_priority: Optional[bool] = False, - post_inference_hooks: Optional[List[PostInferenceHooks]] = None, - default_callback_url: Optional[str] = None, - default_callback_auth_kind: Optional[Literal["basic", "mtls"]] = None, - default_callback_auth_username: Optional[str] = None, - default_callback_auth_password: Optional[str] = None, - default_callback_auth_cert: Optional[str] = None, - default_callback_auth_key: Optional[str] = None, - public_inference: Optional[bool] = None, - update_if_exists: bool = False, - labels: Optional[Dict[str, str]] = None, - ): - """ - Creates and registers a model endpoint in Scale Launch. The returned object is an - instance of type ``Endpoint``, which is a base class of either ``SyncEndpoint`` or - ``AsyncEndpoint``. This is the object to which you sent inference requests. - - Parameters: - endpoint_name: The name of the model endpoint you want to create. The name - must be unique across all endpoints that you own. - - model_name: name for the LLM. List can be found at - (TODO: add list of supported models) - - inference_framework_image_tag: image tag for the inference framework. - (TODO: use latest image tag when unspecified) - - source: source of the LLM. Currently only HuggingFace is supported. - - inference_framework: inference framework for the LLM. Currently only DeepSpeed is supported. - - num_shards: number of shards for the LLM. When bigger than 1, LLM will be sharded - to multiple GPUs. Number of GPUs must be larger than num_shards. - - quantize: Quantization method for the LLM. Only affects behavior for text-generation-inference models. - - checkpoint_path: Path to the checkpoint to load the model from. - Only affects behavior for text-generation-inference models. - - cpus: Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater - than or equal to 1. - - memory: Amount of memory each worker should get, e.g. "4Gi", "512Mi", etc. This must - be a positive amount of memory. - - storage: Amount of local ephemeral storage each worker should get, e.g. "4Gi", - "512Mi", etc. This must be a positive amount of storage. - - gpus: Number of gpus each worker should get, e.g. 0, 1, etc. - - min_workers: The minimum number of workers. Must be greater than or equal to 0. This - should be determined by computing the minimum throughput of your workload and - dividing it by the throughput of a single worker. This field must be at least ``1`` - for synchronous endpoints. - - max_workers: The maximum number of workers. Must be greater than or equal to 0, - and as well as greater than or equal to ``min_workers``. This should be determined by - computing the maximum throughput of your workload and dividing it by the throughput - of a single worker. - - per_worker: The maximum number of concurrent requests that an individual worker can - service. Launch automatically scales the number of workers for the endpoint so that - each worker is processing ``per_worker`` requests, subject to the limits defined by - ``min_workers`` and ``max_workers``. - - - If the average number of concurrent requests per worker is lower than - ``per_worker``, then the number of workers will be reduced. - Otherwise, - if the average number of concurrent requests per worker is higher than - ``per_worker``, then the number of workers will be increased to meet the elevated - traffic. - - Here is our recommendation for computing ``per_worker``: - - 1. Compute ``min_workers`` and ``max_workers`` per your minimum and maximum - throughput requirements. 2. Determine a value for the maximum number of - concurrent requests in the workload. Divide this number by ``max_workers``. Doing - this ensures that the number of workers will "climb" to ``max_workers``. - - gpu_type: If specifying a non-zero number of gpus, this controls the type of gpu - requested. Here are the supported values: - - - ``nvidia-tesla-t4`` - - ``nvidia-ampere-a10`` - - ``nvidia-hopper-h100`` - - ``nvidia-hopper-h100-1g20g`` - - ``nvidia-hopper-h100-3g40g`` - - endpoint_type: Either ``"sync"`` or ``"async"``. - - high_priority: Either ``True`` or ``False``. Enabling this will allow the created - endpoint to leverage the shared pool of prewarmed nodes for faster spinup time. - - post_inference_hooks: List of hooks to trigger after inference tasks are served. - - default_callback_url: The default callback url to use for async endpoints. - This can be overridden in the task parameters for each individual task. - post_inference_hooks must contain "callback" for the callback to be triggered. - - default_callback_auth_kind: The default callback auth kind to use for async endpoints. - Either "basic" or "mtls". This can be overridden in the task parameters for each - individual task. - - default_callback_auth_username: The default callback auth username to use. This only - applies if default_callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_password: The default callback auth password to use. This only - applies if default_callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_cert: The default callback auth cert to use. This only applies - if default_callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - default_callback_auth_key: The default callback auth key to use. This only applies - if default_callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - public_inference: If ``True``, this endpoint will be available to all user IDs for - inference. - - update_if_exists: If ``True``, will attempt to update the endpoint if it exists. - Otherwise, will unconditionally try to create a new endpoint. Note that endpoint - names for a given user must be unique, so attempting to call this function with - ``update_if_exists=False`` for an existing endpoint will raise an error. - - labels: An optional dictionary of key/value pairs to associate with this endpoint. - - Returns: - A Endpoint object that can be used to make requests to the endpoint. - - """ - existing_endpoint = self.get_model_endpoint(endpoint_name) - if update_if_exists and existing_endpoint: - self.edit_model_endpoint( - model_endpoint=endpoint_name, - model_bundle=existing_endpoint.model_endpoint.bundle_name, - cpus=cpus, - memory=memory, - storage=storage, - gpus=gpus, - min_workers=min_workers, - max_workers=max_workers, - per_worker=per_worker, - gpu_type=gpu_type, - high_priority=high_priority, - default_callback_url=default_callback_url, - default_callback_auth_kind=default_callback_auth_kind, - default_callback_auth_username=default_callback_auth_username, - default_callback_auth_password=default_callback_auth_password, - default_callback_auth_cert=default_callback_auth_cert, - default_callback_auth_key=default_callback_auth_key, - public_inference=public_inference, - ) - return existing_endpoint - else: - # Presumably, the user knows that the endpoint doesn't already exist, and so we can - # defer to the server to reject any duplicate creations. - logger.info("Creating new LLM endpoint") - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - post_inference_hooks_strs = None - if post_inference_hooks is not None: - post_inference_hooks_strs = [] - for hook in post_inference_hooks: - if isinstance(hook, PostInferenceHooks): - post_inference_hooks_strs.append(hook.value) - else: - post_inference_hooks_strs.append(hook) - - if default_callback_auth_kind is not None: - default_callback_auth = CallbackAuth( - **dict_not_none( - kind=default_callback_auth_kind, - username=default_callback_auth_username, - password=default_callback_auth_password, - cert=default_callback_auth_cert, - key=default_callback_auth_key, - ) - ) - else: - default_callback_auth = None - - payload = dict_not_none( - name=endpoint_name, - model_name=model_name, - source=source, - inference_framework=inference_framework, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - quantize=quantize, - checkpoint_path=checkpoint_path, - cpus=cpus, - endpoint_type=ModelEndpointType(endpoint_type), - gpus=gpus, - gpu_type=GpuType(gpu_type) if gpu_type is not None else None, - labels=labels or {}, - max_workers=max_workers, - memory=memory, - metadata={}, - min_workers=min_workers, - per_worker=per_worker, - high_priority=high_priority, - post_inference_hooks=post_inference_hooks_strs, - default_callback_url=default_callback_url, - default_callback_auth=default_callback_auth, - storage=storage, - public_inference=public_inference, - ) - create_model_endpoint_request = CreateLLMModelEndpointV1Request(**payload) - response = api_instance.create_model_endpoint_v1_llm_model_endpoints_post( - body=create_model_endpoint_request, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - endpoint_creation_task_id = resp.get("endpoint_creation_task_id", None) # TODO probably throw on None - logger.info("Endpoint creation task id is %s", endpoint_creation_task_id) - model_endpoint = ModelEndpoint( - name=endpoint_name, bundle_name=f"{endpoint_name}-{str(inference_framework)}" - ) - if endpoint_type == "async": - return AsyncEndpoint(model_endpoint=model_endpoint, client=self) - elif endpoint_type == "sync": - return SyncEndpoint(model_endpoint=model_endpoint, client=self) - elif endpoint_type == "streaming": - return StreamingEndpoint(model_endpoint=model_endpoint, client=self) - else: - raise ValueError("Endpoint should be one of the types 'sync', 'async', or 'streaming'") - - def list_llm_model_endpoints(self) -> List[Endpoint]: - """ - Lists all LLM model endpoints that the user has access to. - - Returns: - A list of ``ModelEndpoint`` objects. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.list_model_endpoints_v1_llm_model_endpoints_get(skip_deserialization=True) - resp = json.loads(response.response.data) - async_endpoints: List[Endpoint] = [ - AsyncEndpoint( - model_endpoint=ModelEndpoint.from_dict(endpoint), # type: ignore - client=self, - ) - for endpoint in resp["model_endpoints"] - if endpoint["spec"]["endpoint_type"] == "async" - ] - sync_endpoints: List[Endpoint] = [ - SyncEndpoint( - model_endpoint=ModelEndpoint.from_dict(endpoint), # type: ignore - client=self, - ) - for endpoint in resp["model_endpoints"] - if endpoint["spec"]["endpoint_type"] == "sync" - ] - streaming_endpoints: List[Endpoint] = [ - StreamingEndpoint( - model_endpoint=ModelEndpoint.from_dict(endpoint), # type: ignore - client=self, - ) - for endpoint in resp["model_endpoints"] - if endpoint["spec"]["endpoint_type"] == "streaming" - ] - return async_endpoints + sync_endpoints + streaming_endpoints - - def get_llm_model_endpoint( - self, endpoint_name: str - ) -> Optional[Union[AsyncEndpoint, SyncEndpoint, StreamingEndpoint]]: - """ - Gets a model endpoint associated with a name that the user has access to. - - Parameters: - endpoint_name: The name of the endpoint to retrieve. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"model_endpoint_name": endpoint_name}) - response = api_instance.get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - - if resp["spec"]["endpoint_type"] == "async": - return AsyncEndpoint(ModelEndpoint.from_dict(resp), client=self) # type: ignore - elif resp["spec"]["endpoint_type"] == "sync": - return SyncEndpoint(ModelEndpoint.from_dict(resp), client=self) # type: ignore - elif resp["spec"]["endpoint_type"] == "streaming": - return StreamingEndpoint(ModelEndpoint.from_dict(resp), client=self) # type: ignore - else: - raise ValueError("Endpoint should be one of the types 'sync', 'async', or 'streaming'") - - def completions_sync( - self, - endpoint_name: str, - prompt: str, - max_new_tokens: int, - temperature: float, - stop_sequences: Optional[List[str]] = None, - return_token_log_probs: Optional[bool] = False, - timeout: float = DEFAULT_LLM_COMPLETIONS_TIMEOUT, - ) -> CompletionSyncV1Response: - """ - Run prompt completion on a sync LLM endpoint. Will fail if the endpoint is not sync. - - Parameters: - endpoint_name: The name of the LLM endpoint to make the request to - - prompt: The completion prompt to send to the endpoint - - max_new_tokens: The maximum number of tokens to generate for each prompt - - temperature: The temperature to use for sampling - - stop_sequences: List of sequences to stop the completion at - - return_token_log_probs: Whether to return the log probabilities of the tokens - - Returns: - Response for prompt completion - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - request = CompletionSyncV1Request( - max_new_tokens=max_new_tokens, - prompt=prompt, - temperature=temperature, - stop_sequences=stop_sequences if stop_sequences is not None else [], - return_token_log_probs=return_token_log_probs, - ) - query_params = frozendict({"model_endpoint_name": endpoint_name}) - response = api_instance.create_completion_sync_task_v1_llm_completions_sync_post( # type: ignore - body=request, - query_params=query_params, - skip_deserialization=True, - timeout=timeout, - ) - resp = json.loads(response.response.data) - return resp - - def completions_stream( - self, - endpoint_name: str, - prompt: str, - max_new_tokens: int, - temperature: float, - stop_sequences: Optional[List[str]] = None, - return_token_log_probs: Optional[bool] = False, - timeout: float = DEFAULT_LLM_COMPLETIONS_TIMEOUT, - ) -> Iterable[CompletionStreamV1Response]: - """ - Run prompt completion on an LLM endpoint in streaming fashion. Will fail if endpoint does not support streaming. - - Parameters: - endpoint_name: The name of the LLM endpoint to make the request to - - prompt: The prompt to send to the endpoint - - max_new_tokens: The maximum number of tokens to generate for each prompt - - temperature: The temperature to use for sampling - - stop_sequences: List of sequences to stop the completion at - - return_token_log_probs: Whether to return the log probabilities of the tokens - - Returns: - Iterable responses for prompt completion - """ - request = { - "max_new_tokens": max_new_tokens, - "prompt": prompt, - "temperature": temperature, - "stop_sequences": stop_sequences, - "return_token_log_probs": return_token_log_probs, - } - response = requests.post( - url=f"{self.configuration.host}/v1/llm/completions-stream?model_endpoint_name={endpoint_name}", - json=request, - auth=(self.configuration.username, self.configuration.password), - stream=True, - timeout=timeout, - ) - sse_client = sseclient.SSEClient(response) # type: ignore - events = sse_client.events() - for event in events: - yield json.loads(event.data) - - def create_fine_tune( - self, - model: str, - training_file: str, - validation_file: Optional[str] = None, - fine_tuning_method: Optional[str] = None, - hyperparameters: Optional[Dict[str, str]] = None, - wandb_config: Optional[Dict[str, Any]] = None, - suffix: Optional[str] = None, - ) -> CreateFineTuneResponse: - """ - Create a fine-tune - - Parameters: - model: Identifier of base model to train from. - training_file: Path to file of training dataset. - Dataset must be a csv with columns 'prompt' and 'response'. - validation_file: Path to file of validation dataset. - Has the same format as training_file. If not provided, we will generate a split - from the training dataset. - fine_tuning_method: Fine-tuning method. Currently unused, - but when different techniques are implemented we will expose this field. - hyperparameters: Hyperparameters to pass in to training job. - wandb_config: Configuration for Weights and Biases. - To enable set `hyperparameters["report_to"]` to `wandb`. - `api_key` must be provided which is the API key. - suffix: Optional user-provided identifier suffix for the fine-tuned model. - - Returns: - CreateFineTuneResponse: ID of the created fine-tune - """ - if hyperparameters is None: - hyperparameters = {} - create_fine_tune_request = CreateFineTuneRequest( - **dict_not_none( - model=model, - training_file=training_file, - validation_file=validation_file, - fine_tuning_method=fine_tuning_method, - hyperparameters=hyperparameters, - wandb_config=wandb_config, - suffix=suffix, - ) - ) - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.create_fine_tune_v1_llm_fine_tunes_post( - body=create_fine_tune_request, - skip_deserialization=True, - ) - resp = CreateFineTuneResponse.parse_raw(response.response.data) - - return resp - - def get_fine_tune( - self, - fine_tune_id: str, - ) -> GetFineTuneResponse: - """ - Get status of a fine-tune - - Parameters: - fine_tune_id: ID of the fine-tune - - Returns: - GetFineTuneResponse: ID and status of the requested fine-tune - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"fine_tune_id": fine_tune_id}) - response = api_instance.get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = GetFineTuneResponse.parse_raw(response.response.data) - - return resp - - def list_fine_tunes( - self, - ) -> ListFineTunesResponse: - """ - List fine-tunes - - Returns: - ListFineTunesResponse: list of all fine-tunes and their statuses - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.list_fine_tunes_v1_llm_fine_tunes_get( # type: ignore - skip_deserialization=True, - ) - resp = ListFineTunesResponse.parse_raw(response.response.data) - - return resp - - def cancel_fine_tune( - self, - fine_tune_id: str, - ) -> CancelFineTuneResponse: - """ - Cancel a fine-tune - - Parameters: - fine_tune_id: ID of the fine-tune - - Returns: - CancelFineTuneResponse: whether the cancellation was successful - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"fine_tune_id": fine_tune_id}) - response = api_instance.cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = CancelFineTuneResponse.parse_raw(response.response.data) - - return resp - - def get_fine_tune_events(self, fine_tune_id: str) -> GetFineTuneEventsResponse: - """ - Get list of fine-tune events - - Parameters: - fine_tune_id: ID of the fine-tune - - Returns: - GetFineTuneEventsResponse: a list of all the events of the fine-tune - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"fine_tune_id": fine_tune_id}) - response = api_instance.get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = GetFineTuneEventsResponse.parse_raw(response.response.data) - return resp - - def upload_file( - self, - file_path: str, - ) -> UploadFileResponse: - """ - Upload a file - - Parameters: - file_path: Path to a local file to upload. - - Returns: - UploadFileResponse: ID of the created file - """ - with open(file_path, "rb") as file: - body = BodyUploadFileV1FilesPost(file=file) - - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.upload_file_v1_files_post( - body=body, - skip_deserialization=True, - ) - resp = UploadFileResponse.parse_raw(response.response.data) - - return resp - - def get_file( - self, - file_id: str, - ) -> GetFileResponse: - """ - Get metadata about a file - - Parameters: - file_id: ID of the file - - Returns: - GetFileResponse: ID, filename, and size of the requested file - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"file_id": file_id}) - response = api_instance.get_file_v1_files_file_id_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = GetFileResponse.parse_raw(response.response.data) - - return resp - - def list_files( - self, - ) -> ListFilesResponse: - """ - List files - - Returns: - ListFilesResponse: list of all files (ID, filename, and size) - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - response = api_instance.list_files_v1_files_get( # type: ignore - skip_deserialization=True, - ) - resp = ListFilesResponse.parse_raw(response.response.data) - - return resp - - def delete_file( - self, - file_id: str, - ) -> DeleteFileResponse: - """ - Delete a file - - Parameters: - file_id: ID of the file - - Returns: - DeleteFileResponse: whether the deletion was successful - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"file_id": file_id}) - response = api_instance.delete_file_v1_files_file_id_delete( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = DeleteFileResponse.parse_raw(response.response.data) - - return resp - - def get_file_content( - self, - file_id: str, - ) -> GetFileContentResponse: - """ - Get a file's content - - Parameters: - file_id: ID of the file - - Returns: - GetFileContentResponse: ID and content of the requested file - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"file_id": file_id}) - response = api_instance.get_file_content_v1_files_file_id_content_get( # type: ignore - path_params=path_params, - skip_deserialization=True, - ) - resp = GetFileContentResponse.parse_raw(response.response.data) - - return resp - - def model_download(self, model_name: str, download_format: str = "hugging_face") -> ModelDownloadResponse: - """ - download a finetuned model - - Parameters: - model_name: name of the model to download - download_format: format of the model to download - - Returns: - ModelDownloadResponse: dictionary with file names and urls to download the model - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - request_body = {"model_name": model_name, "download_format": download_format} - response = api_instance.download_model_endpoint_v1_llm_model_endpoints_download_post( # type: ignore - body=request_body, - skip_deserialization=True, - ) - resp = ModelDownloadResponse.parse_raw(response.response.data) - - return resp - - def delete_llm_model_endpoint(self, model_endpoint_name: str) -> bool: - """ - Deletes an LLM model endpoint. - - Parameters: - model_endpoint_name: The name of the model endpoint to delete. - """ - with ApiClient(self.configuration) as api_client: - api_instance = DefaultApi(api_client) - path_params = frozendict({"model_endpoint_name": model_endpoint_name}) - response = api_instance.delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( # type: ignore # noqa: E501 - path_params=path_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - return resp["deleted"] - - -def _zip_directory(zipf: ZipFile, path: str) -> None: - for root, _, files in os.walk(path): - for file_ in files: - zipf.write( - filename=os.path.join(root, file_), - arcname=os.path.relpath(os.path.join(root, file_), os.path.join(path, "..")), - ) - - -def _zip_directories(zip_path: str, dir_list: List[str]) -> None: - with ZipFile(zip_path, "w") as zip_f: - for dir_ in dir_list: - _zip_directory(zip_f, dir_) diff --git a/launch/connection.py b/launch/connection.py deleted file mode 100644 index f44e0aa9..00000000 --- a/launch/connection.py +++ /dev/null @@ -1,97 +0,0 @@ -import time -from typing import Optional - -import requests - -from launch.constants import DEFAULT_NETWORK_TIMEOUT_SEC -from launch.errors import APIError -from launch.logger import logger -from launch.retry_strategy import RetryStrategy - - -class Connection: - """Wrapper of HTTP requests to the Launch endpoint.""" - - def __init__(self, api_key: str, endpoint: Optional[str] = None): - self.api_key = api_key - self.endpoint = endpoint - - def __repr__(self): - return f"Connection(api_key='{self.api_key}', endpoint='{self.endpoint}')" - - def __eq__(self, other): - return self.api_key == other.api_key and self.endpoint == other.endpoint - - def delete(self, route: str): - return self.make_request( - {}, - route, - requests_command=requests.delete, - ) - - def get(self, route: str): - return self.make_request( - {}, - route, - requests_command=requests.get, - ) - - def post(self, payload: dict, route: str): - return self.make_request( - payload, - route, - requests_command=requests.post, - ) - - def put(self, payload: dict, route: str): - return self.make_request( - payload, - route, - requests_command=requests.put, - ) - - def make_request( - self, - payload: dict, - route: str, - requests_command=requests.post, - ) -> dict: - """ - Makes a request to Launch endpoint and logs a warning if not - successful. - - :param payload: given payload - :param route: route for the request - :param requests_command: requests.post, requests.get, requests.delete - :return: response JSON - """ - endpoint = f"{self.endpoint}/{route}" - - logger.info("Make request to %s", endpoint) - - for retry_wait_time in RetryStrategy.sleep_times: - response = requests_command( - endpoint, - json=payload, - headers={"Content-Type": "application/json"}, - auth=(self.api_key, ""), - timeout=DEFAULT_NETWORK_TIMEOUT_SEC, - ) - logger.info("API request has response code %s", response.status_code) - if response.status_code not in RetryStrategy.statuses: - break - time.sleep(retry_wait_time) - - if not response.ok: - self.handle_bad_response(endpoint, requests_command, response) - - return response.json() - - def handle_bad_response( - self, - endpoint, - requests_command, - requests_response=None, - aiohttp_response=None, - ): - raise APIError(endpoint, requests_command, requests_response, aiohttp_response) diff --git a/launch/constants.py b/launch/constants.py deleted file mode 100644 index 30fd2bf2..00000000 --- a/launch/constants.py +++ /dev/null @@ -1,14 +0,0 @@ -ENDPOINT_PATH = "endpoints" -MODEL_BUNDLE_SIGNED_URL_PATH = "model_bundle_upload" -BATCH_TASK_INPUT_SIGNED_URL_PATH = "batch_task_input_upload" -ASYNC_TASK_PATH = "task_async" -ASYNC_TASK_RESULT_PATH = "task/result" -SYNC_TASK_PATH = "task_sync" -BATCH_TASK_PATH = "batch_job" -BATCH_TASK_RESULTS_PATH = "batch_job" -RESULT_PATH = "result" -DEFAULT_SCALE_ENDPOINT = "https://api.scale.com" -SCALE_LAUNCH_V0_PATH = "/v1/hosted_inference" -SCALE_LAUNCH_V1_PATH = "/v1/launch" - -DEFAULT_NETWORK_TIMEOUT_SEC = 120 diff --git a/launch/docker_image_batch_job_bundle.py b/launch/docker_image_batch_job_bundle.py deleted file mode 100644 index 8acff240..00000000 --- a/launch/docker_image_batch_job_bundle.py +++ /dev/null @@ -1,60 +0,0 @@ -import datetime -from typing import Dict, List, Optional - -from pydantic import BaseModel - - -class CreateDockerImageBatchJobBundleResponse(BaseModel): - """ - Response Object for creating a Docker Image Batch Job Bundle - Note: only available for self-hosted mode - """ - - docker_image_batch_job_bundle_id: str - """ID of the Docker Image Batch Job Bundle""" - - -class DockerImageBatchJobBundleResponse(BaseModel): - """ - Response object for a single Docker Image Batch Job Bundle - Note: only available for self-hosted mode - """ - - id: str - """ID of the Docker Image Batch Job Bundle""" - name: str - """Name of the Docker Image Batch Job Bundle""" - created_at: datetime.datetime - """Timestamp of when the Docker Image Batch Job Bundle was created""" - image_repository: str - """Short repository name of the underlying docker image""" - image_tag: str - """Tag of the underlying docker image""" - command: List[str] - """The command to run inside the docker image""" - env: Dict[str, str] - """Environment variables to be injected into the docker image""" - mount_location: Optional[str] = None - """Location of a json-formatted file to mount inside the docker image. - Contents get populated at runtime, and this is the method to change behavior on runtime.""" - cpus: Optional[str] = None - """Default number of cpus to give to the docker image""" - memory: Optional[str] = None - """Default amount of memory to give to the docker image""" - storage: Optional[str] = None - """Default amount of disk to give to the docker image""" - gpus: Optional[int] = None - """Default number of gpus to give to the docker image""" - gpu_type: Optional[str] = None - """Default type of gpu, e.g. nvidia-tesla-t4, nvidia-ampere-a10 to give to the docker image""" - - -class ListDockerImageBatchJobBundleResponse(BaseModel): - """ - Response object for listing Docker Image Batch Job Bundles. - Note: only available for self-hosted mode - """ - - docker_image_batch_job_bundles: List[DockerImageBatchJobBundleResponse] - """A list of - [Docker Image Batch Job Bundles](./#launch.docker_image_batch_job_bundle.DockerImageBatchJobBundleResponse).""" diff --git a/launch/errors.py b/launch/errors.py deleted file mode 100644 index 05615755..00000000 --- a/launch/errors.py +++ /dev/null @@ -1,38 +0,0 @@ -import pkg_resources - -api_client_version = pkg_resources.get_distribution("scale-launch").version - -INFRA_FLAKE_MESSAGES = [ - "downstream duration timeout", - "upstream connect error or disconnect/reset before headers. reset reason: local reset", -] - - -class APIError(Exception): - def __init__(self, endpoint, command, requests_response=None, aiohttp_response=None): - message = ( - f"Your client is on version {api_client_version}. If you have not recently " - "done so, please make sure you have updated to the latest version of the " - "client by reinstalling the client.\n " - ) - if requests_response is not None: - message += ( - f"Tried to {command.__name__} {endpoint}, but received {requests_response.status_code}: " - f"{requests_response.reason}." - ) - self.status_code = requests_response.status_code - if hasattr(requests_response, "text"): - if requests_response.text: - message += f"\nThe detailed error is:\n{requests_response.text}" - - if aiohttp_response is not None: - status, reason, data = aiohttp_response - message += f"Tried to {command.__name__} {endpoint}, but received {status}: {reason}." - self.status_code = status - if data: - message += f"\nThe detailed error is:\n{data}" - - if any(infra_flake_message in message for infra_flake_message in INFRA_FLAKE_MESSAGES): - message += "\n This likely indicates temporary downtime of the API, please try again " "in a minute or two " - - super().__init__(message) diff --git a/launch/file.py b/launch/file.py deleted file mode 100644 index 16deb75e..00000000 --- a/launch/file.py +++ /dev/null @@ -1,44 +0,0 @@ -from typing import List - -from pydantic import BaseModel, Field - - -class UploadFileResponse(BaseModel): - """Response object for uploading a file.""" - - id: str = Field(..., description="ID of the uploaded file.") - """ID of the uploaded file.""" - - -class GetFileResponse(BaseModel): - """Response object for retrieving a file.""" - - id: str = Field(..., description="ID of the requested file.") - """ID of the requested file.""" - filename: str = Field(..., description="File name.") - """File name.""" - size: int = Field(..., description="Length of the file, in characters.") - """Length of the file, in characters.""" - - -class ListFilesResponse(BaseModel): - """Response object for listing files.""" - - files: List[GetFileResponse] = Field(..., description="List of file IDs, names, and sizes.") - """List of file IDs, names, and sizes.""" - - -class DeleteFileResponse(BaseModel): - """Response object for deleting a file.""" - - deleted: bool = Field(..., description="Whether deletion was successful.") - """Whether deletion was successful.""" - - -class GetFileContentResponse(BaseModel): - """Response object for retrieving a file's content.""" - - id: str = Field(..., description="ID of the requested file.") - """ID of the requested file.""" - content: str = Field(..., description="File content.") - """File content.""" diff --git a/launch/find_packages.py b/launch/find_packages.py deleted file mode 100644 index 2cc06200..00000000 --- a/launch/find_packages.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2019 Atalaya Tech, Inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import ast -import logging -import os -import pkgutil -import sys -import types -import zipfile -import zipimport -from typing import Dict - -EPP_NO_ERROR = 0 -EPP_PKG_NOT_EXIST = 1 -EPP_PKG_VERSION_MISMATCH = 2 - -ZIPIMPORT_DIR = "zipimports" - -__mm = None - - -logger = logging.getLogger(__name__) - - -def parse_requirement_string(rs): - name, _, version = rs.partition("==") - return name, version - - -def verify_pkg(pkg_req): - global __mm # pylint: disable=global-statement - if __mm is None: - __mm = ModuleManager() - return __mm.verify_pkg(pkg_req) - - -def seek_pip_packages(target_py_file_path): - global __mm # pylint: disable=global-statement - if __mm is None: - __mm = ModuleManager() - return __mm.seek_pip_packages(target_py_file_path) - - -def seek_pip_packages_from_imports(import_set): - global __mm # pylint: disable=global-statement - if __mm is None: - __mm = ModuleManager() - return __mm.seek_in_import_set(import_set) - - -def get_pkg_version(pkg_name): - global __mm # pylint: disable=global-statement - if __mm is None: - __mm = ModuleManager() - return __mm.pip_pkg_map.get(pkg_name, None) - - -def get_zipmodules(): - global __mm # pylint: disable=global-statement - if __mm is None: - __mm = ModuleManager() - return __mm.zip_modules - - -def get_all_pip_installed_modules(): - global __mm # pylint: disable=global-statement - if __mm is None: - __mm = ModuleManager() - - installed_modules = list( - # local modules are the ones imported from current directory, either from a - # module.py file or a module directory that contains a `__init__.py` file - filter(lambda m: not m.is_local, __mm.searched_modules.values()) - ) - return list(map(lambda m: m.name, installed_modules)) - - -class ModuleInfo: - def __init__(self, name, path, is_local, is_pkg): - super().__init__() - self.name = name - self.path = path - self.is_local = is_local - self.is_pkg = is_pkg - - -class ModuleManager: - def __init__(self): - super().__init__() - self.pip_pkg_map = {} - self.pip_module_map = {} - self.setuptools_module_set = set() - self.nonlocal_package_path = set() - - import pkg_resources - - # yixu: this populates either self.pip_pkg_map or self.nonlocal_package_path - # pkg_resources.working_set is basically a snapshot of sys.path, i.e. the packages that - # are imported - for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable - module_path = dist.module_path or dist.location - if not module_path: - # Skip if no module path was found for pkg distribution - continue - - if os.path.realpath(module_path) != os.getcwd(): - # add to nonlocal_package path only if it's not current directory - self.nonlocal_package_path.add(module_path) - - self.pip_pkg_map[dist._key] = dist._version - for mn in dist._get_metadata("top_level.txt"): - if dist._key != "setuptools": - self.pip_module_map.setdefault(mn, []).append((dist._key, dist._version)) - else: - self.setuptools_module_set.add(mn) - - # yixu: searched_modules is basically just pkgutil.iter_modules - self.searched_modules = {} - self.zip_modules: Dict[str, zipimport.zipimporter] = {} - for m in pkgutil.iter_modules(): - if m.name not in self.searched_modules: - if isinstance(m.module_finder, zipimport.zipimporter): - print(f"Detected zipimporter {m.module_finder}") - path = m.module_finder.archive - self.zip_modules[path] = m.module_finder - else: - path = m.module_finder.path - is_local = self.is_local_path(path) - self.searched_modules[m.name] = ModuleInfo(m.name, path, is_local, m.ispkg) - - def verify_pkg(self, pkg_req): - if pkg_req.name not in self.pip_pkg_map: - # package does not exist in the current python session - return EPP_PKG_NOT_EXIST - - if self.pip_pkg_map[pkg_req.name] not in pkg_req.specifier: - # package version being used in the current python session does not meet - # the specified package version requirement - return EPP_PKG_VERSION_MISMATCH - - return EPP_NO_ERROR - - def seek_pip_packages(self, target_py_file_path): - print("target py file path: %s", target_py_file_path) - work = DepSeekWork(self) - work.do(target_py_file_path) - requirements = {} - for _, pkg_info_list in work.dependencies.items(): - for pkg_name, pkg_version in pkg_info_list: - requirements[pkg_name] = pkg_version - - return requirements, work.unknown_module_set - - def seek_in_import_set(self, import_set): - work = DepSeekWork(self) - work.do_import_set(import_set) - requirements = {} - for _, pkg_info_list in work.dependencies.items(): - for pkg_name, pkg_version in pkg_info_list: - requirements[pkg_name] = pkg_version - - return requirements, work.unknown_module_set - - def is_local_path(self, path): - if path in self.nonlocal_package_path: - return False - - dir_name = os.path.split(path)[1] - # pylint: disable=too-many-boolean-expressions - if ( - "site-packages" in path - or "anaconda" in path - or path.endswith("packages") - or dir_name == "bin" - or dir_name.startswith("lib") - or dir_name.startswith("python") - or dir_name.startswith("plat") - ): - self.nonlocal_package_path.add(path) - return False - - return True - - -class DepSeekWork: - def __init__(self, module_manager): - super().__init__() - self.module_manager = module_manager - - self.dependencies = {} - self.unknown_module_set = set() - self.parsed_module_set = set() - - def do(self, target_py_file_path): - self.seek_in_file(target_py_file_path) - - def do_import_set(self, import_set): - self.seek_in_import_set(import_set) - - def seek_in_file(self, file_path): - try: - with open(file_path) as f: # pylint: disable=unspecified-encoding - content = f.read() - except UnicodeDecodeError: - with open(file_path, encoding="utf-8") as f: - content = f.read() - self.seek_in_source(content) - - def seek_in_import_set(self, import_set): - for module_name in import_set: - if module_name == "launch": - continue - if module_name in self.parsed_module_set: - continue - self.parsed_module_set.add(module_name) - - if module_name in self.module_manager.searched_modules: - m = self.module_manager.searched_modules[module_name] - if m.is_local: - # Recursively search dependencies in sub-modules - if m.path in self.module_manager.zip_modules: - self.seek_in_zip(m.path) - elif m.is_pkg: - self.seek_in_dir(os.path.join(m.path, m.name)) - else: - self.seek_in_file(os.path.join(m.path, f"{m.name}.py")) - else: - # check if the package has already been added to the list - if ( - module_name in self.module_manager.pip_module_map - and module_name not in self.dependencies - and module_name not in self.module_manager.setuptools_module_set - ): - self.dependencies[module_name] = self.module_manager.pip_module_map[module_name] - else: - if module_name in self.module_manager.pip_module_map: - if module_name not in self.dependencies: - # In some special cases, the pip-installed module can not - # be located in the searched_modules - self.dependencies[module_name] = self.module_manager.pip_module_map[module_name] - else: - if module_name not in sys.builtin_module_names: - self.unknown_module_set.add(module_name) - - def seek_in_source(self, content): - # Extract all dependency modules by searching through the trees of the Python - # abstract syntax grammar with Python's built-in ast module - tree = ast.parse(content) - import_set = set() - for node in ast.walk(tree): - if isinstance(node, ast.Import): - for name in node.names: - import_set.add(name.name.partition(".")[0]) - elif isinstance(node, ast.ImportFrom): - if node.module is not None and node.level == 0: - import_set.add(node.module.partition(".")[0]) - self.seek_in_import_set(import_set) - - def seek_in_dir(self, dir_path): - for path, dir_list, file_list in os.walk(dir_path): - for file_name in file_list: - if not file_name.endswith(".py"): - continue - self.seek_in_file(os.path.join(path, file_name)) - for dir_name in dir_list: - if dir_name == "__pycache__": - continue - self.seek_in_dir(os.path.join(path, dir_name)) - - def seek_in_zip(self, zip_path): - with zipfile.ZipFile(zip_path) as zf: - for module_path in zf.infolist(): - filename = module_path.filename - if filename.endswith(".py"): - logger.debug("Seeking modules in zip %s", filename) - content = self.module_manager.zip_modules[zip_path].get_source(filename.replace(".py", "")) - self.seek_in_source(content) - - -def find_packages_from_path(path: str): - """ - Call this on another python file. - """ - reqs, _ = seek_pip_packages(path) - return reqs - - -def find_packages_from_imports(globals_copy): - """ - Call this from a python notebook to get the current notebook's packages. - """ - modules = get_imports(globals_copy) - # Extract names of the modules - import_names = set(map(lambda m: m.__name__, modules)) - reqs, _ = seek_pip_packages_from_imports(import_names) - return reqs - - -def get_imports(globals_copy): - """ - Get a list of imported modules from `globals_copy`. - """ - modules = [] - for _, val in globals_copy.items(): - if isinstance(val, types.ModuleType): - modules.append(val) - return modules diff --git a/launch/fine_tune.py b/launch/fine_tune.py deleted file mode 100644 index 3f220ea1..00000000 --- a/launch/fine_tune.py +++ /dev/null @@ -1,51 +0,0 @@ -from enum import Enum -from typing import List, Optional - -from pydantic import BaseModel - - -class BatchJobStatus(str, Enum): - PENDING = "PENDING" - RUNNING = "RUNNING" - SUCCESS = "SUCCESS" - FAILURE = "FAILURE" - CANCELLED = "CANCELLED" - UNDEFINED = "UNDEFINED" - TIMEOUT = "TIMEOUT" - - -class LLMFineTuneEvent(BaseModel): - timestamp: Optional[float] = None - message: str - level: str - - -class CreateFineTuneResponse(BaseModel): - id: str - """ID of the created fine-tuning job""" - - -class GetFineTuneResponse(BaseModel): - id: str - """ID of the requested job""" - fine_tuned_model: Optional[str] = None - """ - Name of the resulting fine-tuned model. This can be plugged into the - Completion API ones the fine-tune is complete - """ - status: BatchJobStatus - """Status of the requested job""" - - -class ListFineTunesResponse(BaseModel): - jobs: List[GetFineTuneResponse] - """List of fine-tuning jobs and their statuses""" - - -class CancelFineTuneResponse(BaseModel): - success: bool - """Whether cancellation was successful""" - - -class GetFineTuneEventsResponse(BaseModel): - events: List[LLMFineTuneEvent] diff --git a/launch/hooks.py b/launch/hooks.py deleted file mode 100644 index 6feedae5..00000000 --- a/launch/hooks.py +++ /dev/null @@ -1,13 +0,0 @@ -from enum import Enum - - -class PostInferenceHooks(str, Enum): - """ - Post-inference hooks are functions that are called after inference is complete. - - Attributes: - CALLBACK: The callback hook is called with the inference response and the task ID. - """ - - # INSIGHT = "insight" - CALLBACK: str = "callback" diff --git a/launch/logger.py b/launch/logger.py deleted file mode 100644 index 7299d5a9..00000000 --- a/launch/logger.py +++ /dev/null @@ -1,13 +0,0 @@ -import logging -import typing - -import requests - -logger = logging.getLogger(__name__) -logging.basicConfig() - -if typing.TYPE_CHECKING: - # Ignore the following code because `requests.packages` does not pass mypy - pass -else: - logging.getLogger(requests.packages.urllib3.__package__).setLevel(logging.ERROR) # pylint: disable=no-member diff --git a/launch/make_batch_file.py b/launch/make_batch_file.py deleted file mode 100644 index ee3690ed..00000000 --- a/launch/make_batch_file.py +++ /dev/null @@ -1,24 +0,0 @@ -import base64 -import csv -import json -from typing import IO, Any, Dict, List - - -def make_batch_input_file(urls: List[str], file: IO[str]): - writer = csv.DictWriter(file, fieldnames=["id", "url"]) - writer.writeheader() - for i, url in enumerate(urls): - writer.writerow({"id": i, "url": url}) - - -def make_batch_input_dict_file(inputs: List[Dict[str, Any]], file: IO[str]): - writer = csv.DictWriter(file, fieldnames=["id", "args"]) - writer.writeheader() - for i, args in enumerate(inputs): - args_encoded = base64.b64encode(json.dumps(args).encode("utf-8")).decode("utf-8") - writer.writerow( - { - "id": i, - "args": args_encoded, - } - ) diff --git a/launch/model.py b/launch/model.py deleted file mode 100644 index 3cf0517e..00000000 --- a/launch/model.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import Dict - -from pydantic import BaseModel, Field - - -class ModelDownloadRequest(BaseModel): - """Request object for downloading a model.""" - - model_name: str = Field(..., description="Model name.") - """Model name.""" - download_format: str = Field(..., description="Download format.") - """Desired download format (default=huggingface).""" - - -class ModelDownloadResponse(BaseModel): - """Response object for downloading a model.""" - - urls: Dict[str, str] = Field(..., description="Dictionary of model file name, model download url pairs.") - """Model download urls.""" diff --git a/launch/model_bundle.py b/launch/model_bundle.py deleted file mode 100644 index 47ecf7c6..00000000 --- a/launch/model_bundle.py +++ /dev/null @@ -1,270 +0,0 @@ -import datetime -from abc import ABC -from dataclasses import dataclass -from enum import Enum -from typing import Any, Dict, List, Optional, Union - -from dataclasses_json import Undefined, dataclass_json -from pydantic import BaseModel, Field -from typing_extensions import Literal - -# TODO(yi): These docstrings are currently perfunctory. I'm not sure we even want to expose most of these -# fields. We need to overhaul our types :sadge: - - -class ModelBundleFrameworkType(str, Enum): - PYTORCH = "pytorch" - TENSORFLOW = "tensorflow" - CUSTOM = "custom_base_image" - - -class PytorchFramework(BaseModel): - framework_type: Literal[ModelBundleFrameworkType.PYTORCH] - - pytorch_image_tag: str - """Image tag of the Pytorch image to use.""" - - -class TensorflowFramework(BaseModel): - framework_type: Literal[ModelBundleFrameworkType.TENSORFLOW] - - tensorflow_version: str - """Tensorflow version to use.""" - - -class CustomFramework(BaseModel): - framework_type: Literal[ModelBundleFrameworkType.CUSTOM] - - image_repository: str - """Docker image repository to use as the base image.""" - - image_tag: str - """Docker image tag to use as the base image.""" - - -class ModelBundleFlavorType(str, Enum): - CLOUDPICKLE_ARTIFACT = "cloudpickle_artifact" - ZIP_ARTIFACT = "zip_artifact" - RUNNABLE_IMAGE = "runnable_image" - STREAMING_ENHANCED_RUNNABLE_IMAGE = "streaming_enhanced_runnable_image" - TRITON_ENHANCED_RUNNABLE_IMAGE = "triton_enhanced_runnable_image" - - -class CloudpickleArtifactFlavor(BaseModel): - flavor: Literal[ModelBundleFlavorType.CLOUDPICKLE_ARTIFACT] - - requirements: List[str] - """List of requirements to install in the environment before running the model.""" - - framework: Union[PytorchFramework, TensorflowFramework, CustomFramework] = Field( - ..., discriminator="framework_type" - ) - """ - Machine Learning framework specification. Either - [`PytorchFramework`](./#launch.model_bundle.PytorchFramework), - [`TensorflowFramework`](./#launch.model_bundle.TensorflowFramework), or - [`CustomFramework`](./#launch.model_bundle.CustomFramework). - """ - - app_config: Optional[Dict[str, Any]] - """Optional configuration for the application.""" - - location: str - - load_predict_fn: str - """Function which, when called, returns the prediction function.""" - - load_model_fn: str - """Function which, when called, returns the model object.""" - - -class ZipArtifactFlavor(BaseModel): - flavor: Literal[ModelBundleFlavorType.ZIP_ARTIFACT] - - requirements: List[str] - """List of requirements to install in the environment before running the model.""" - - framework: Union[PytorchFramework, TensorflowFramework, CustomFramework] = Field( - ..., discriminator="framework_type" - ) - """ - Machine Learning framework specification. Either - [`PytorchFramework`](./#launch.model_bundle.PytorchFramework), - [`TensorflowFramework`](./#launch.model_bundle.TensorflowFramework), or - [`CustomFramework`](./#launch.model_bundle.CustomFramework). - """ - - app_config: Optional[Dict[str, Any]] = None - """Optional configuration for the application.""" - - location: str - - load_predict_fn_module_path: str - """Path to the module to load the prediction function.""" - - load_model_fn_module_path: str - """Path to the module to load the model object.""" - - -class RunnableImageLike(BaseModel, ABC): - """An abstract base for flavors that are related to bundles defined by runnable images.""" - - repository: str - tag: str - command: List[str] - env: Optional[Dict[str, str]] = None - protocol: Literal["http"] # TODO: add support for other protocols (e.g. grpc) - readiness_initial_delay_seconds: int = 120 - - -class RunnableImageFlavor(RunnableImageLike): - """Model bundles that use custom docker images that expose an HTTP server for inference.""" - - flavor: Literal[ModelBundleFlavorType.RUNNABLE_IMAGE] - - -class StreamingEnhancedRunnableImageFlavor(RunnableImageLike): - """For deployments that expose a streaming route in a container.""" - - flavor: Literal[ModelBundleFlavorType.STREAMING_ENHANCED_RUNNABLE_IMAGE] - streaming_command: List[str] - - -class TritonEnhancedRunnableImageFlavor(RunnableImageLike): - """For runnable image models that require tritonserver running in a container.""" - - flavor: Literal[ModelBundleFlavorType.TRITON_ENHANCED_RUNNABLE_IMAGE] - - triton_model_repository: str - - triton_model_replicas: Optional[Dict[str, str]] = None - - triton_num_cpu: float - - triton_commit_tag: str - - triton_storage: Optional[str] = None - - triton_memory: Optional[str] = None - - triton_readiness_initial_delay_seconds: int = 300 - - -ModelBundleFlavors = Union[ - CloudpickleArtifactFlavor, - ZipArtifactFlavor, - RunnableImageFlavor, - StreamingEnhancedRunnableImageFlavor, - TritonEnhancedRunnableImageFlavor, -] -"""Union type exhaustively representing all valid model bundle flavors. - -Valid model bundle flavors are: -- [`CloudpickleArtifactFlavor`](./#launch.model_bundle.CloudpickleArtifactFlavor) -- [`ZipArtifactFlavor`](./#launch.model_bundle.ZipArtifactFlavor) -- [`RunnableImageFlavor`](./#launch.model_bundle.RunnableImageFlavor) -- [`StreamingEnhancedRunnableImageFlavor`](./#launch.model_bundle.StreamingEnhancedRunnableImageFlavor) -- [`TritonEnhancedRunnableImageFlavor`](./#launch.model_bundle.TritonEnhancedRunnableImageFlavor) -""" - - -class CreateModelBundleV2Response(BaseModel): - """ - Response object for creating a Model Bundle. - """ - - model_bundle_id: str - """ID of the Model Bundle.""" - - -class ModelBundleV2Response(BaseModel): - """ - Response object for a single Model Bundle. - """ - - id: str - """ID of the Model Bundle.""" - - name: str - """Name of the Model Bundle.""" - - metadata: Dict[str, Any] - """Metadata associated with the Model Bundle.""" - - created_at: datetime.datetime - """Timestamp of when the Model Bundle was created.""" - - model_artifact_ids: List[str] - """IDs of the Model Artifacts associated with the Model Bundle.""" - - schema_location: Optional[str] = None - - flavor: ModelBundleFlavors = Field(..., discriminator="flavor") - """Flavor of the Model Bundle, representing how the model bundle was packaged. - - See [`ModelBundleFlavors`](./#launch_api.model_bundle.ModelBundleFlavors) for details. - """ - - -class ListModelBundlesV2Response(BaseModel): - """ - Response object for listing Model Bundles. - """ - - model_bundles: List[ModelBundleV2Response] - """A list of [Model Bundles](./#launch.model_bundle.ModelBundleV2Response).""" - - -@dataclass_json(undefined=Undefined.EXCLUDE) -@dataclass -class ModelBundle: - """ - Represents a ModelBundle. - """ - - name: str - """ - The name of the bundle. Must be unique across all bundles that the user owns. - """ - - id: Optional[str] = None - """ - A globally unique identifier for the bundle. - """ - - env_params: Optional[Dict[str, str]] = None - """ - A dictionary that dictates environment information. See LaunchClient.create_model_bundle - for more information. - """ - - location: Optional[str] = None - """ - An opaque location for the bundle. - """ - - metadata: Optional[Dict[Any, Any]] = None - """ - Arbitrary metadata for the bundle. - """ - - packaging_type: Optional[str] = None - """ - The packaging type for the bundle. Can be ``cloudpickle`` or ``zip``. - """ - - requirements: Optional[List[str]] = None - """ - A list of Python package requirements for the bundle. See LaunchClient.create_model_bundle - for more information. - """ - - app_config: Optional[Dict[Any, Any]] = None - """ - An optional user-specified configuration mapping for the bundle. - """ - - created_at: Optional[str] = None - - def __str__(self): - return f"ModelBundle(bundle_name={self.name})" diff --git a/launch/model_endpoint.py b/launch/model_endpoint.py deleted file mode 100644 index b9f1585b..00000000 --- a/launch/model_endpoint.py +++ /dev/null @@ -1,684 +0,0 @@ -import concurrent.futures -import json -import time -import uuid -from abc import ABC, abstractmethod -from collections import Counter -from dataclasses import dataclass -from typing import Dict, Iterator, List, Optional, Sequence - -import sseclient -from dataclasses_json import Undefined, dataclass_json -from deprecation import deprecated -from typing_extensions import Literal - -from launch.api_client import ApiClient -from launch.api_client.apis.tags.default_api import DefaultApi -from launch.request_validation import validate_task_request - -TASK_PENDING_STATE = "PENDING" -TASK_SUCCESS_STATE = "SUCCESS" -TASK_FAILURE_STATE = "FAILURE" - -# Echoes fields in EndpointResponse class -ALLOWED_ENDPOINT_RESPONSE_FIELDS = {"status", "result_url", "result", "traceback", "status_code"} - - -@dataclass_json(undefined=Undefined.EXCLUDE) -@dataclass -class ModelEndpoint: - """ - Represents an Endpoint from the database. - """ - - name: str - """ - The name of the endpoint. Must be unique across all endpoints owned by the user. - """ - - id: Optional[str] = None - """ - A globally unique identifier for the endpoint. - """ - - bundle_name: Optional[str] = None - """ - The name of the bundle for the endpoint. The owner of the bundle must be the same as the owner for the endpoint. - """ - - status: Optional[str] = None - """ - The status of the endpoint. - """ - - resource_state: Optional[dict] = None - """ - Resource state for the endpoint. - """ - - deployment_state: Optional[dict] = None - """ - Deployment state for the endpoint. - """ - - metadata: Optional[dict] = None - """ - Metadata for the endpoint. - """ - - endpoint_type: Optional[str] = None - """ - The type of the endpoint. Must be ``'async'`` or ``'sync'``. - """ - - configs: Optional[dict] = None - """ - Config for the endpoint. - """ - - destination: Optional[str] = None - """ - Queue identifier for endpoint, use only for debugging. - """ - - post_inference_hooks: Optional[List[str]] = None - """ - List of post inference hooks for the endpoint. - """ - - default_callback_url: Optional[str] = None - """ - Default callback url for the endpoint. - """ - - def __repr__(self): - return ( - f"ModelEndpoint(name='{self.name}', bundle_name='{self.bundle_name}', " - f"status='{self.status}', resource_state='{json.dumps(self.resource_state)}', " - f"deployment_state='{json.dumps(self.deployment_state)}', " - f"endpoint_type='{self.endpoint_type}', metadata='{self.metadata}')" - ) - - -class EndpointRequest: - """ - Represents a single request to either a ``SyncEndpoint``, ``StreamingEndpoint``, or ``AsyncEndpoint``. - - Parameters: - url: A url to some file that can be read in to a ModelBundle's predict function. Can be an image, raw text, etc. - **Note**: the contents of the file located at ``url`` are opened as a sequence of ``bytes`` and passed - to the predict function. If you instead want to pass the url itself as an input to the predict function, - see ``args``. - - Exactly one of ``url`` and ``args`` must be specified. - - args: A Dictionary with arguments to a ModelBundle's predict function. If the predict function has signature - ``predict_fn(foo, bar)``, then the keys in the dictionary should be ``"foo"`` and ``"bar"``. - Values must be native Python objects. - - Exactly one of ``url`` and ``args`` must be specified. - - return_pickled: Whether the output should be a pickled python object, or directly returned serialized json. - - callback_url: The callback url to use for this task. If None, then the - default_callback_url of the endpoint is used. The endpoint must specify - "callback" as a post-inference hook for the callback to be triggered. - - callback_auth_kind: The default callback auth kind to use for async endpoints. - Either "basic" or "mtls". This can be overridden in the task parameters for each - individual task. - - callback_auth_username: The default callback auth username to use. This only - applies if callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - callback_auth_password: The default callback auth password to use. This only - applies if callback_auth_kind is "basic". This can be overridden in the task - parameters for each individual task. - - callback_auth_cert: The default callback auth cert to use. This only applies - if callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - callback_auth_key: The default callback auth key to use. This only applies - if callback_auth_kind is "mtls". This can be overridden in the task - parameters for each individual task. - - request_id: (deprecated) A user-specifiable id for requests. - Should be unique among EndpointRequests made in the same batch call. - If one isn't provided the client will generate its own. - - extra_headers: An optional dictionary which is passed on to the model endpoint - as extra HTTP headers. - """ - - def __init__( - self, - url: Optional[str] = None, - args: Optional[Dict] = None, - callback_url: Optional[str] = None, - callback_auth_kind: Optional[Literal["basic", "mtls"]] = None, - callback_auth_username: Optional[str] = None, - callback_auth_password: Optional[str] = None, - callback_auth_cert: Optional[str] = None, - callback_auth_key: Optional[str] = None, - return_pickled: Optional[bool] = False, - request_id: Optional[str] = None, - extra_headers: Optional[Dict[str, str]] = None, - ): - # TODO: request_id is pretty much here only to support the clientside AsyncEndpointBatchResponse - # so it should be removed when we get proper batch endpoints working. - validate_task_request(url=url, args=args) - if request_id is None: - request_id = str(uuid.uuid4()) - self.url = url - self.args = args - self.callback_url = callback_url - self.callback_auth_kind = callback_auth_kind - self.callback_auth_username = callback_auth_username - self.callback_auth_password = callback_auth_password - self.callback_auth_cert = callback_auth_cert - self.callback_auth_key = callback_auth_key - self.return_pickled = return_pickled - self.request_id: str = request_id - self.extra_headers = extra_headers - - -class EndpointResponse: - """ - Represents a response received from a Endpoint. - - """ - - def __init__( - self, - client, - status: str, - result_url: Optional[str] = None, - result: Optional[str] = None, - traceback: Optional[str] = None, - status_code: Optional[int] = None, - ): - """ - Parameters: - client: An instance of ``LaunchClient``. - - status: A string representing the status of the request, i.e. ``SUCCESS``, ``FAILURE``, or ``PENDING`` - - result_url: A string that is a url containing the pickled python object from the - Endpoint's predict function. - - Exactly one of ``result_url`` or ``result`` will be populated, - depending on the value of ``return_pickled`` in the request. - - result: A string that is the serialized return value (in json form) of the Endpoint's predict function. - Specifically, one can ``json.loads()`` the value of result to get the original python object back. - - Exactly one of ``result_url`` or ``result`` will be populated, - depending on the value of ``return_pickled`` in the request. - - traceback: The stack trace if the inference endpoint raised an error. Can be used for debugging - - status_code: The underlying status code of the response, given from the inference endpoint itself. - - """ - self.client = client - self.status = status - self.result_url = result_url - self.result = result - self.traceback = traceback - self.status_code = status_code - - def __str__(self) -> str: - return ( - f"status: {self.status}, result: {self.result}, result_url: {self.result_url}, " - f"traceback: {self.traceback}" - ) - - -class EndpointResponseFuture: - """ - Represents a future response from an Endpoint. Specifically, when the ``EndpointResponseFuture`` is ready, - then its ``get`` method will return an actual instance of ``EndpointResponse``. - - This object should not be directly instantiated by the user. - """ - - def __init__(self, client, endpoint_name: str, async_task_id: str): - """ - Parameters: - client: An instance of ``LaunchClient``. - - endpoint_name: The name of the endpoint. - - async_task_id: An async task id. - """ - self.client = client - self.endpoint_name = endpoint_name - self.async_task_id = async_task_id - - def get(self, timeout: Optional[float] = None) -> EndpointResponse: - """ - Retrieves the ``EndpointResponse`` for the prediction request after it completes. This method blocks. - - Parameters: - timeout: The maximum number of seconds to wait for the response. If None, then - the method will block indefinitely until the response is ready. - """ - if timeout is not None and timeout <= 0: - raise ValueError("Timeout must be greater than 0.") - start_time = time.time() - while timeout is None or time.time() - start_time < timeout: - async_response = self.client._get_async_endpoint_response( # pylint: disable=W0212 - self.endpoint_name, self.async_task_id - ) - status = async_response["status"] - if status in ["PENDING", "STARTED"]: - time.sleep(2) - else: - if status == "SUCCESS": - return EndpointResponse( - client=self.client, - status=status, - result_url=async_response.get("result", {}).get("result_url", None), - result=async_response.get("result", {}).get("result", None), - traceback=None, - status_code=async_response.get("status_code", None), - ) - elif status == "FAILURE": - return EndpointResponse( - client=self.client, - status=status, - result_url=None, - result=None, - traceback=async_response.get("traceback", None), - status_code=async_response.get("status_code", None), - ) - else: - raise ValueError(f"Unrecognized status: {async_response['status']}") - raise TimeoutError - - -class EndpointResponseStream(Iterator): - """ - Represents a stream response from an Endpoint. This object is iterable and yields - ``EndpointResponse`` objects. - - This object should not be directly instantiated by the user. - """ - - def __init__(self, response): - self.sse_client = sseclient.SSEClient(response) - self.events = self.sse_client.events() - - def __iter__(self): - """Uses server-sent events to iterate through the stream.""" - return self - - def __next__(self): - """Uses server-sent events to iterate through the stream.""" - event = self.events.__next__() - data = json.loads(event.data) - result = data.get("result", {}) or {} - return EndpointResponse( - client=None, - status=data["status"], - result_url=result.get("result_url", None), - result=result.get("result", None), - traceback=data.get("traceback"), - status_code=data.get("status_code", None), - ) - - -class Endpoint(ABC): - """An abstract class that represent any kind of endpoints in Scale Launch""" - - def __init__(self, model_endpoint: ModelEndpoint, client): - self.model_endpoint = model_endpoint - self.client = client - - def _update_model_endpoint_view(self): - with ApiClient(self.client.configuration) as api_client: - api_instance = DefaultApi(api_client) - query_params = {"name": self.model_endpoint.name} - response = api_instance.list_model_endpoints_v1_model_endpoints_get( - query_params=query_params, - skip_deserialization=True, - ) - resp = json.loads(response.response.data) - if len(resp["model_endpoints"]) == 0: - raise ValueError(f"Could not update model endpoint view for endpoint {self.model_endpoint.name}") - resp = resp["model_endpoints"][0] - self.model_endpoint = ModelEndpoint.from_dict(resp) - - def status(self) -> Optional[str]: - """Gets the status of the Endpoint.""" - self._update_model_endpoint_view() - return self.model_endpoint.status - - def resource_state(self) -> Optional[dict]: - """Gets the resource state of the Endpoint.""" - self._update_model_endpoint_view() - return self.model_endpoint.resource_state - - def deployment_state(self) -> Optional[dict]: - """Gets the worker settings of the Endpoint.""" - self._update_model_endpoint_view() - return self.model_endpoint.deployment_state - - @abstractmethod - def predict(self, request: EndpointRequest): - """Runs a prediction request.""" - - -class SyncEndpoint(Endpoint): - """ - A synchronous model endpoint. - """ - - def __init__(self, model_endpoint: ModelEndpoint, client): - """ - Parameters: - model_endpoint: ModelEndpoint object. - - client: A LaunchClient object - """ - super().__init__(model_endpoint=model_endpoint, client=client) - - def __str__(self): - return f"SyncEndpoint " - - def __repr__(self): - return ( - f"SyncEndpoint(name='{self.model_endpoint.name}', " - f"bundle_name='{self.model_endpoint.bundle_name}', " - f"status='{self.model_endpoint.status}', " - f"resource_state='{json.dumps(self.model_endpoint.resource_state)}', " - f"deployment_state='{json.dumps(self.model_endpoint.deployment_state)}', " - f"endpoint_type='{self.model_endpoint.endpoint_type}', " - f"metadata='{self.model_endpoint.metadata}')" - ) - - def predict(self, request: EndpointRequest) -> EndpointResponse: - """ - Runs a synchronous prediction request. - - Parameters: - request: The ``EndpointRequest`` object that contains the payload. - """ - raw_response = self.client._sync_request( # pylint: disable=W0212 - self.model_endpoint.name, - url=request.url, - args=request.args, - return_pickled=request.return_pickled, - extra_headers=request.extra_headers, - ) - - raw_response = { - k: v for k, v in raw_response.items() if v is not None and k in ALLOWED_ENDPOINT_RESPONSE_FIELDS - } - return EndpointResponse(client=self.client, **raw_response) - - -class StreamingEndpoint(Endpoint): - """ - A synchronous model endpoint. - """ - - def __init__(self, model_endpoint: ModelEndpoint, client): - """ - Parameters: - model_endpoint: ModelEndpoint object. - - client: A LaunchClient object - """ - super().__init__(model_endpoint=model_endpoint, client=client) - - def __str__(self): - return f"StreamingEndpoint " - - def __repr__(self): - return ( - f"StreamingEndpoint(name='{self.model_endpoint.name}', " - f"bundle_name='{self.model_endpoint.bundle_name}', " - f"status='{self.model_endpoint.status}', " - f"resource_state='{json.dumps(self.model_endpoint.resource_state)}', " - f"deployment_state='{json.dumps(self.model_endpoint.deployment_state)}', " - f"endpoint_type='{self.model_endpoint.endpoint_type}', " - f"metadata='{self.model_endpoint.metadata}')" - ) - - def predict(self, request: EndpointRequest) -> EndpointResponseStream: - """ - Runs a streaming prediction request. - - Parameters: - request: The ``EndpointRequest`` object that contains the payload. - - Returns: - An ``EndpointResponseStream`` object that can be used to iterate through the stream. - """ - raw_response = self.client._streaming_request( # pylint: disable=W0212 - self.model_endpoint.name, - url=request.url, - args=request.args, - return_pickled=request.return_pickled, - extra_headers=request.extra_headers, - ) - return EndpointResponseStream(response=raw_response) - - -class AsyncEndpoint(Endpoint): - """ - An asynchronous model endpoint. - """ - - def __init__(self, model_endpoint: ModelEndpoint, client): - """ - Parameters: - model_endpoint: ModelEndpoint object. - - client: A LaunchClient object - """ - super().__init__(model_endpoint=model_endpoint, client=client) - - def __str__(self): - return f"AsyncEndpoint " - - def __repr__(self): - return ( - f"AsyncEndpoint(name='{self.model_endpoint.name}', " - f"bundle_name='{self.model_endpoint.bundle_name}', " - f"status='{self.model_endpoint.status}', " - f"resource_state='{json.dumps(self.model_endpoint.resource_state)}', " - f"deployment_state='{json.dumps(self.model_endpoint.deployment_state)}', " - f"endpoint_type='{self.model_endpoint.endpoint_type}', " - f"metadata='{self.model_endpoint.metadata}')" - ) - - def predict(self, request: EndpointRequest) -> EndpointResponseFuture: - """ - Runs an asynchronous prediction request. - - Parameters: - request: The ``EndpointRequest`` object that contains the payload. - - Returns: - An ``EndpointResponseFuture`` such the user can use to query the status of the request. - Example: - - .. code-block:: python - - my_endpoint = AsyncEndpoint(...) - f: EndpointResponseFuture = my_endpoint.predict(EndpointRequest(...)) - result = f.get() # blocks on completion - """ - response = self.client._async_request( # pylint: disable=W0212 - self.model_endpoint.name, - url=request.url, - args=request.args, - callback_url=request.callback_url, - callback_auth_kind=request.callback_auth_kind, - callback_auth_username=request.callback_auth_username, - callback_auth_password=request.callback_auth_password, - callback_auth_cert=request.callback_auth_cert, - callback_auth_key=request.callback_auth_key, - return_pickled=request.return_pickled, - extra_headers=request.extra_headers, - ) - async_task_id = response["task_id"] - return EndpointResponseFuture( - client=self.client, - endpoint_name=self.model_endpoint.name, - async_task_id=async_task_id, - ) - - @deprecated - def predict_batch(self, requests: Sequence[EndpointRequest]) -> "AsyncEndpointBatchResponse": - """ - (deprecated) - Runs inference on the data items specified by urls. Returns a AsyncEndpointResponse. - - Parameters: - requests: List of EndpointRequests. Request_ids must all be distinct. - - Returns: - an AsyncEndpointResponse keeping track of the inference requests made - """ - # Make inference requests to the endpoint, - # if batches are possible make this aware you can pass batches - # TODO add batch support once those are out - - if len(requests) != len(set(request.request_id for request in requests)): - raise ValueError("Request_ids in a batch must be unique") - - def single_request(request): - # request has keys url and args - - inner_inference_request = self.client._async_request( # pylint: disable=W0212 - endpoint_name=self.model_endpoint.name, - url=request.url, - args=request.args, - callback_url=request.callback_url, - callback_auth_kind=request.callback_auth_kind, - callback_auth_username=request.callback_auth_username, - callback_auth_password=request.callback_auth_password, - callback_auth_cert=request.callback_auth_cert, - callback_auth_key=request.callback_auth_key, - return_pickled=request.return_pickled, - ) - request_key = request.request_id - return request_key, inner_inference_request - - with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor: - urls_to_requests = executor.map(single_request, requests) - request_ids = dict(urls_to_requests) - - return AsyncEndpointBatchResponse( - self.client, - endpoint_name=self.model_endpoint.name, - request_ids=request_ids, - ) - - -@deprecated -class AsyncEndpointBatchResponse: - """ - (deprecated) - - Currently represents a list of async inference requests to a specific endpoint. Keeps track of the requests made, - and gives a way to poll for their status. - - Invariant: set keys for self.request_ids and self.responses are equal - - idk about this abstraction tbh, could use a redesign maybe? - - Also batch inference sort of removes the need for much of the complication in here - - """ - - def __init__( - self, - client, - endpoint_name: str, - request_ids: Dict[str, str], - ): - self.client = client - self.endpoint_name = endpoint_name - self.request_ids = request_ids.copy() # custom request_id (clientside) -> task_id (serverside) - self.responses: Dict[str, Optional[EndpointResponse]] = {req_id: None for req_id in request_ids.keys()} - # celery task statuses - self.statuses: Dict[str, Optional[str]] = {req_id: TASK_PENDING_STATE for req_id in request_ids.keys()} - - def poll_endpoints(self): - """ - Runs one round of polling the endpoint for async task results. - """ - - # TODO: replace with batch endpoint, or make requests in parallel - # TODO: Make this private. - - def single_request(inner_url, inner_task_id): - if self.statuses[inner_url] != TASK_PENDING_STATE: - # Skip polling tasks that are completed - return None - inner_response = self.client._get_async_endpoint_response( # pylint: disable=W0212 - self.endpoint_name, inner_task_id - ) - print("inner response", inner_response) - return ( - inner_url, - inner_task_id, - inner_response.get("status", None), - inner_response, - ) - - with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor: - responses = executor.map( - single_request, - self.request_ids.keys(), - self.request_ids.values(), - ) - - for response in responses: - if response is None: - continue - url, _, status, raw_response = response - if status: - self.statuses[url] = status - if raw_response: - response_object = EndpointResponse( - client=self.client, - status=raw_response["status"], - result_url=raw_response.get("result_url", None), - result=raw_response.get("result", None), - traceback=raw_response.get("traceback", None), - status_code=raw_response.get("status_code", None), - ) - self.responses[url] = response_object - - def is_done(self, poll=True) -> bool: - """ - Checks the client local status to see if all requests are done. - - Parameters: - poll: If ``True``, then this will first check the status for a subset - of the remaining incomplete tasks on the Launch server. - """ - # TODO: make some request to some endpoint - if poll: - self.poll_endpoints() - return all(resp != TASK_PENDING_STATE for resp in self.statuses.values()) - - def get_responses(self) -> Dict[str, Optional[EndpointResponse]]: - """ - Returns a dictionary, where each key is the request_id for an EndpointRequest passed in, and the corresponding - object at that key is the corresponding EndpointResponse. - """ - if not self.is_done(poll=False): - raise ValueError("Not all responses are done") - return self.responses.copy() - - def batch_status(self): - counter = Counter(self.statuses.values()) - return dict(counter) diff --git a/launch/pydantic_schemas.py b/launch/pydantic_schemas.py deleted file mode 100644 index d69f8299..00000000 --- a/launch/pydantic_schemas.py +++ /dev/null @@ -1,66 +0,0 @@ -from enum import Enum -from typing import Any, Callable, Dict, Set, Type, Union - -import pydantic -from pydantic import BaseModel - -PYDANTIC_V2 = hasattr(pydantic, "VERSION") and pydantic.VERSION.startswith("2.") - -if not PYDANTIC_V2: - from pydantic.schema import ( # pylint: disable=no-name-in-module - get_flat_models_from_models, - model_process_schema, - ) - - -REF_PREFIX = "#/components/schemas/" - - -def get_model_definitions_v1(request_schema: Type[BaseModel], response_schema: Type[BaseModel]) -> Dict[str, Any]: - """ - Gets the model schemas in jsonschema format from a sequence of Pydantic BaseModels. - """ - flat_models = get_flat_models_from_models([request_schema, response_schema]) - model_name_map = {model: model.__name__ for model in flat_models} - model_name_map.update({request_schema: "RequestSchema", response_schema: "ResponseSchema"}) - return get_model_definitions_from_flat_models(flat_models=flat_models, model_name_map=model_name_map) - - -def get_model_definitions_v2(request_schema: Type[BaseModel], response_schema: Type[BaseModel]) -> Dict[str, Any]: - return { - "RequestSchema": request_schema.model_json_schema(), # type: ignore - "ResponseSchema": response_schema.model_json_schema(), # type: ignore - } - - -if PYDANTIC_V2: - get_model_definitions: Callable = get_model_definitions_v2 # type: ignore -else: - get_model_definitions: Callable = get_model_definitions_v1 # type: ignore - - -def get_model_definitions_from_flat_models( - *, - flat_models: Set[Union[Type[BaseModel], Type[Enum]]], - model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str], -) -> Dict[str, Any]: - """ - Gets the model schemas in jsonschema format from a set of Pydantic BaseModels (or Enums). - Inspired by https://github.com/tiangolo/fastapi/blob/99d8470a8e1cf76da8c5274e4e372630efc95736/fastapi/utils.py#L38 - - Args: - flat_models (Set[Union[Type[BaseModel], Type[Enum]]]): The models. - model_name_map (Dict[Union[Type[BaseModel], Type[Enum]], str]): The map from model to name. - - Returns: - Dict[str, Any]: OpenAPI-compatible schema of model definitions. - """ - definitions: Dict[str, Dict[str, Any]] = {} - for model in flat_models: - m_schema, m_definitions, _ = model_process_schema(model, model_name_map=model_name_map, ref_prefix=REF_PREFIX) - definitions.update(m_definitions) - model_name = model_name_map[model] - if "description" in m_schema: - m_schema["description"] = m_schema["description"].split("\f")[0] - definitions[model_name] = m_schema - return definitions diff --git a/launch/request_validation.py b/launch/request_validation.py deleted file mode 100644 index 6a376f0d..00000000 --- a/launch/request_validation.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -Contains client-side validation functions -""" - -import logging - -logger = logging.getLogger(__name__) -logging.basicConfig() - - -def validate_task_request(url, args): - # A task request must have at least one of url or args, otherwise there's no input! - if url is None and args is None: - raise ValueError("Must specify at least one of url or args") - if url is not None and args is not None: - logger.warning("Passing both url and args to task request; args will be ignored") diff --git a/launch/retry_strategy.py b/launch/retry_strategy.py deleted file mode 100644 index eabc1309..00000000 --- a/launch/retry_strategy.py +++ /dev/null @@ -1,4 +0,0 @@ -# TODO: use retry library instead of custom code. Tenacity is one option. -class RetryStrategy: - statuses = {503, 524, 520, 504} - sleep_times = [1, 3, 9] diff --git a/launch/utils.py b/launch/utils.py deleted file mode 100644 index 08d4fb75..00000000 --- a/launch/utils.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Any, Dict - - -def trim_kwargs(kwargs_dict: Dict[Any, Any]): - """ - Returns a copy of kwargs_dict with None values removed - """ - dict_copy = {k: v for k, v in kwargs_dict.items() if v is not None} - return dict_copy diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index 14005bf2..00000000 --- a/mkdocs.yml +++ /dev/null @@ -1,92 +0,0 @@ -site_name: Launch -site_description: Simple, modern and high performance machine learning deployment in python. -site_url: https://scaleapi.github.io/launch-python-client/ - -theme: - name: material - palette: - - scheme: default - primary: blue grey - accent: indigo - toggle: - icon: material/lightbulb - name: Switch to dark mode - - scheme: slate - primary: blue grey - accent: indigo - toggle: - icon: material/lightbulb-outline - name: Switch to light mode - features: - - search.suggest - - search.highlight - - content.tabs.link - - content.code.annotate - - content.code.copy - icon: - repo: fontawesome/brands/github-alt - logo: _static/launch-logo.svg - favicon: _static/favicon-32x32.png - language: en - -repo_name: scaleapi/launch-python-client -repo_url: https://github.com/scaleapi/launch-python-client -edit_uri: '' -nav: - - Introduction: index.md - - CLI: cli.md - - Concepts: - - concepts/overview.md - - concepts/model_bundles.md - - concepts/model_endpoints.md - - concepts/endpoint_predictions.md - - concepts/batch_jobs.md - - concepts/callbacks.md - - 'API Documentation': - - api/client.md - - api/llms.md - - api/model_bundles.md - - api/model_endpoints.md - - api/endpoint_predictions.md - - api/hooks.md - - Guides: - - guides/custom_docker_images.md - -markdown_extensions: - - toc: - permalink: true - - admonition - - pymdownx.details - - pymdownx.superfences - - pymdownx.highlight: - anchor_linenums: true - - pymdownx.inlinehilite - - pymdownx.snippets - - attr_list - - md_in_html - - mdx_include - - pymdownx.emoji: - emoji_index: !!python/name:materialx.emoji.twemoji - emoji_generator: !!python/name:materialx.emoji.to_svg - - pymdownx.tabbed: - alternate_style: true - -plugins: - - search - - mkdocstrings: - watch: [launch] - handlers: - python: - rendering: - show_root_heading: true - show_root_full_path: false - show_source: false - heading_level: 2 - merge_init_into_class: true - show_signature_annotations: true - separate_signature: true - - mkdocs-simple-hooks: - hooks: - on_pre_build: 'docs.plugins:on_pre_build' - on_files: 'docs.plugins:on_files' - on_page_markdown: 'docs.plugins:on_page_markdown' diff --git a/objects.inv b/objects.inv new file mode 100644 index 00000000..40da7244 Binary files /dev/null and b/objects.inv differ diff --git a/openapi.json b/openapi.json deleted file mode 100644 index 24818db9..00000000 --- a/openapi.json +++ /dev/null @@ -1,14645 +0,0 @@ -{ - "openapi": "3.0.3", - "info": { - "title": "launch", - "version": "1.0.0" - }, - "paths": { - "/v1/batch-jobs": { - "post": { - "summary": "Create Batch Job", - "description": "Runs a batch job.", - "operationId": "create_batch_job_v1_batch_jobs_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateBatchJobV1Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateBatchJobV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/batch-jobs/{batch_job_id}": { - "get": { - "summary": "Get Batch Job", - "description": "Gets a batch job.", - "operationId": "get_batch_job_v1_batch_jobs__batch_job_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "batch_job_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Batch Job Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetBatchJobV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "put": { - "summary": "Update Batch Job", - "description": "Updates a batch job.", - "operationId": "update_batch_job_v1_batch_jobs__batch_job_id__put", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "batch_job_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Batch Job Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateBatchJobV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateBatchJobV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/docker-image-batch-jobs": { - "post": { - "summary": "Create Docker Image Batch Job", - "operationId": "create_docker_image_batch_job_v1_docker_image_batch_jobs_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateDockerImageBatchJobV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateDockerImageBatchJobV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "summary": "List Docker Image Batch Jobs", - "description": "Lists docker image batch jobs spawned by trigger with given ID", - "operationId": "list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "trigger_id", - "in": "query", - "required": false, - "schema": { - "title": "Trigger Id", - "type": "string", - "nullable": true - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListDockerImageBatchJobsV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/docker-image-batch-jobs/{batch_job_id}": { - "get": { - "summary": "Get Docker Image Batch Job", - "operationId": "get_docker_image_batch_job_v1_docker_image_batch_jobs__batch_job_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "batch_job_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Batch Job Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetDockerImageBatchJobV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "put": { - "summary": "Update Docker Image Batch Job", - "operationId": "update_docker_image_batch_job_v1_docker_image_batch_jobs__batch_job_id__put", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "batch_job_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Batch Job Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateDockerImageBatchJobV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateDockerImageBatchJobV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/async-tasks": { - "post": { - "summary": "Create Async Inference Task", - "description": "Runs an async inference prediction.", - "operationId": "create_async_inference_task_v1_async_tasks_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_id", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EndpointPredictV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateAsyncTaskV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/async-tasks/{task_id}": { - "get": { - "summary": "Get Async Inference Task", - "description": "Gets the status of an async inference task.", - "operationId": "get_async_inference_task_v1_async_tasks__task_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "task_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Task Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetAsyncTaskV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/sync-tasks": { - "post": { - "summary": "Create Sync Inference Task", - "description": "Runs a sync inference prediction.", - "operationId": "create_sync_inference_task_v1_sync_tasks_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_id", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SyncEndpointPredictV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SyncEndpointPredictV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/streaming-tasks": { - "post": { - "summary": "Create Streaming Inference Task", - "description": "Runs a streaming inference prediction.", - "operationId": "create_streaming_inference_task_v1_streaming_tasks_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_id", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SyncEndpointPredictV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/model-bundles": { - "post": { - "summary": "Create Model Bundle", - "description": "Creates a ModelBundle for the current user.", - "operationId": "create_model_bundle_v1_model_bundles_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelBundleV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "summary": "List Model Bundles", - "description": "Lists the ModelBundles owned by the current owner.", - "operationId": "list_model_bundles_v1_model_bundles_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_name", - "in": "query", - "required": false, - "schema": { - "title": "Model Name", - "type": "string", - "nullable": true - } - }, - { - "name": "order_by", - "in": "query", - "required": false, - "schema": { - "title": "Order By", - "$ref": "#/components/schemas/ModelBundleOrderBy", - "nullable": true - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListModelBundlesV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/model-bundles/clone-with-changes": { - "post": { - "summary": "Clone Model Bundle With Changes", - "description": "Creates a ModelBundle by cloning an existing one and then applying changes on top.", - "operationId": "clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CloneModelBundleV1Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/model-bundles/latest": { - "get": { - "summary": "Get Latest Model Bundle", - "description": "Gets the latest Model Bundle with the given name owned by the current owner.", - "operationId": "get_latest_model_bundle_v1_model_bundles_latest_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_name", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Model Name" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/model-bundles/{model_bundle_id}": { - "get": { - "summary": "Get Model Bundle", - "description": "Gets the details for a given ModelBundle owned by the current owner.", - "operationId": "get_model_bundle_v1_model_bundles__model_bundle_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_bundle_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Bundle Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/model-bundles": { - "post": { - "summary": "Create Model Bundle", - "description": "Creates a ModelBundle for the current user.", - "operationId": "create_model_bundle_v2_model_bundles_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelBundleV2Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelBundleV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "summary": "List Model Bundles", - "description": "Lists the ModelBundles owned by the current owner.", - "operationId": "list_model_bundles_v2_model_bundles_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_name", - "in": "query", - "required": false, - "schema": { - "title": "Model Name", - "type": "string", - "nullable": true - } - }, - { - "name": "order_by", - "in": "query", - "required": false, - "schema": { - "title": "Order By", - "$ref": "#/components/schemas/ModelBundleOrderBy", - "nullable": true - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListModelBundlesV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/model-bundles/clone-with-changes": { - "post": { - "summary": "Clone Model Bundle With Changes", - "description": "Creates a ModelBundle by cloning an existing one and then applying changes on top.", - "operationId": "clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CloneModelBundleV2Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelBundleV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v2/model-bundles/latest": { - "get": { - "summary": "Get Latest Model Bundle", - "description": "Gets the latest Model Bundle with the given name owned by the current owner.", - "operationId": "get_latest_model_bundle_v2_model_bundles_latest_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_name", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Model Name" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelBundleV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/model-bundles/{model_bundle_id}": { - "get": { - "summary": "Get Model Bundle", - "description": "Gets the details for a given ModelBundle owned by the current owner.", - "operationId": "get_model_bundle_v2_model_bundles__model_bundle_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_bundle_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Bundle Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelBundleV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/model-endpoints": { - "post": { - "summary": "Create Model Endpoint", - "description": "Creates a Model for the current user.", - "operationId": "create_model_endpoint_v1_model_endpoints_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelEndpointV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "summary": "List Model Endpoints", - "description": "Lists the Models owned by the current owner.", - "operationId": "list_model_endpoints_v1_model_endpoints_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "name", - "in": "query", - "required": false, - "schema": { - "title": "Name", - "type": "string", - "nullable": true - } - }, - { - "name": "order_by", - "in": "query", - "required": false, - "schema": { - "title": "Order By", - "$ref": "#/components/schemas/ModelEndpointOrderBy", - "nullable": true - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListModelEndpointsV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/model-endpoints/{model_endpoint_id}": { - "get": { - "summary": "Get Model Endpoint", - "description": "Describe the Model endpoint with given ID.", - "operationId": "get_model_endpoint_v1_model_endpoints__model_endpoint_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "put": { - "summary": "Update Model Endpoint", - "description": "Updates the Model endpoint.", - "operationId": "update_model_endpoint_v1_model_endpoints__model_endpoint_id__put", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateModelEndpointV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "delete": { - "summary": "Delete Model Endpoint", - "description": "Lists the Models owned by the current owner.", - "operationId": "delete_model_endpoint_v1_model_endpoints__model_endpoint_id__delete", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/model-endpoints/{model_endpoint_id}/restart": { - "post": { - "summary": "Restart Model Endpoint", - "description": "Restarts the Model endpoint.", - "operationId": "restart_model_endpoint_v1_model_endpoints__model_endpoint_id__restart_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RestartModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/model-endpoints-schema.json": { - "get": { - "summary": "Get Model Endpoints Schema", - "description": "Lists the schemas of the Model Endpoints owned by the current owner.", - "operationId": "get_model_endpoints_schema_v1_model_endpoints_schema_json_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/model-endpoints-api": { - "get": { - "summary": "Get Model Endpoints Api", - "description": "Shows the API of the Model Endpoints owned by the current owner.", - "operationId": "get_model_endpoints_api_v1_model_endpoints_api_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/docker-image-batch-job-bundles": { - "post": { - "summary": "Create Docker Image Batch Job Bundle", - "description": "Creates a docker iamge batch job bundle", - "operationId": "create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateDockerImageBatchJobBundleV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateDockerImageBatchJobBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "summary": "List Docker Image Batch Job Model Bundles", - "description": "Lists docker image batch job bundles owned by current owner", - "operationId": "list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "bundle_name", - "in": "query", - "required": false, - "schema": { - "title": "Bundle Name", - "type": "string", - "nullable": true - } - }, - { - "name": "order_by", - "in": "query", - "required": false, - "schema": { - "title": "Order By", - "$ref": "#/components/schemas/ModelBundleOrderBy", - "nullable": true - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListDockerImageBatchJobBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/docker-image-batch-job-bundles/latest": { - "get": { - "summary": "Get Latest Docker Image Batch Job Bundle", - "description": "Gets latest Docker Image Batch Job Bundle with given name owned by the current owner", - "operationId": "get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "bundle_name", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Bundle Name" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DockerImageBatchJobBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/docker-image-batch-job-bundles/{docker_image_batch_job_bundle_id}": { - "get": { - "summary": "Get Docker Image Batch Job Model Bundle", - "description": "Get details for a given DockerImageBatchJobBundle owned by the current owner", - "operationId": "get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles__docker_image_batch_job_bundle_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "docker_image_batch_job_bundle_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Docker Image Batch Job Bundle Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DockerImageBatchJobBundleV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/model-endpoints": { - "post": { - "summary": "Create Model Endpoint", - "operationId": "create_model_endpoint_v1_llm_model_endpoints_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateLLMModelEndpointV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateLLMModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "summary": "List Model Endpoints", - "description": "Lists the LLM model endpoints owned by the current owner, plus all public_inference LLMs.", - "operationId": "list_model_endpoints_v1_llm_model_endpoints_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "name", - "in": "query", - "required": false, - "schema": { - "title": "Name", - "type": "string", - "nullable": true - } - }, - { - "name": "order_by", - "in": "query", - "required": false, - "schema": { - "title": "Order By", - "$ref": "#/components/schemas/ModelEndpointOrderBy", - "nullable": true - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListLLMModelEndpointsV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/model-endpoints/{model_endpoint_name}": { - "get": { - "summary": "Get Model Endpoint", - "description": "Describe the LLM Model endpoint with given name.", - "operationId": "get_model_endpoint_v1_llm_model_endpoints__model_endpoint_name__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_name", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Name" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetLLMModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "put": { - "summary": "Update Model Endpoint", - "description": "Updates an LLM endpoint for the current user.", - "operationId": "update_model_endpoint_v1_llm_model_endpoints__model_endpoint_name__put", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_name", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Name" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateLLMModelEndpointV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateLLMModelEndpointV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "delete": { - "summary": "Delete Llm Model Endpoint", - "operationId": "delete_llm_model_endpoint_v1_llm_model_endpoints__model_endpoint_name__delete", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_name", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Name" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteLLMEndpointResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/completions-sync": { - "post": { - "summary": "Create Completion Sync Task", - "description": "Runs a sync prompt completion on an LLM.", - "operationId": "create_completion_sync_task_v1_llm_completions_sync_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_name", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Name" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CompletionSyncV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CompletionSyncV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/completions-stream": { - "post": { - "summary": "Create Completion Stream Task", - "description": "Runs a stream prompt completion on an LLM.", - "operationId": "create_completion_stream_task_v1_llm_completions_stream_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "model_endpoint_name", - "in": "query", - "required": true, - "schema": { - "type": "string", - "title": "Model Endpoint Name" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CompletionStreamV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CompletionStreamV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/fine-tunes": { - "get": { - "summary": "List Fine Tunes", - "operationId": "list_fine_tunes_v1_llm_fine_tunes_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListFineTunesResponse" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - }, - "post": { - "summary": "Create Fine Tune", - "operationId": "create_fine_tune_v1_llm_fine_tunes_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateFineTuneRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateFineTuneResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/llm/fine-tunes/{fine_tune_id}": { - "get": { - "summary": "Get Fine Tune", - "operationId": "get_fine_tune_v1_llm_fine_tunes__fine_tune_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "fine_tune_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Fine Tune Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetFineTuneResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/fine-tunes/{fine_tune_id}/cancel": { - "put": { - "summary": "Cancel Fine Tune", - "operationId": "cancel_fine_tune_v1_llm_fine_tunes__fine_tune_id__cancel_put", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "fine_tune_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Fine Tune Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CancelFineTuneResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/fine-tunes/{fine_tune_id}/events": { - "get": { - "summary": "Get Fine Tune Events", - "operationId": "get_fine_tune_events_v1_llm_fine_tunes__fine_tune_id__events_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "fine_tune_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Fine Tune Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetFineTuneEventsResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/llm/model-endpoints/download": { - "post": { - "summary": "Download Model Endpoint", - "operationId": "download_model_endpoint_v1_llm_model_endpoints_download_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelDownloadRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelDownloadResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/llm/batch-completions": { - "post": { - "summary": "Create Batch Completions", - "operationId": "create_batch_completions_v1_llm_batch_completions_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateBatchCompletionsV1Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateBatchCompletionsV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/files": { - "get": { - "summary": "List Files", - "operationId": "list_files_v1_files_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListFilesResponse" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - }, - "post": { - "summary": "Upload File", - "operationId": "upload_file_v1_files_post", - "requestBody": { - "content": { - "multipart/form-data": { - "schema": { - "$ref": "#/components/schemas/Body_upload_file_v1_files_post" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UploadFileResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/files/{file_id}": { - "get": { - "summary": "Get File", - "operationId": "get_file_v1_files__file_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "file_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "File Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetFileResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "delete": { - "summary": "Delete File", - "operationId": "delete_file_v1_files__file_id__delete", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "file_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "File Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteFileResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/files/{file_id}/content": { - "get": { - "summary": "Get File Content", - "description": "Describe the LLM Model endpoint with given name.", - "operationId": "get_file_content_v1_files__file_id__content_get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "file_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "File Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetFileContentResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v1/triggers": { - "get": { - "summary": "List Triggers", - "description": "Lists descriptions of all triggers", - "operationId": "list_triggers_v1_triggers_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListTriggersV1Response" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - }, - "post": { - "summary": "Create Trigger", - "description": "Creates and runs a trigger", - "operationId": "create_trigger_v1_triggers_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateTriggerV1Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateTriggerV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v1/triggers/{trigger_id}": { - "get": { - "summary": "Get Trigger", - "description": "Describes the trigger with the given ID", - "operationId": "get_trigger_v1_triggers__trigger_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "trigger_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Trigger Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetTriggerV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "put": { - "summary": "Update Trigger", - "description": "Updates the trigger with the given ID", - "operationId": "update_trigger_v1_triggers__trigger_id__put", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "trigger_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Trigger Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateTriggerV1Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateTriggerV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "delete": { - "summary": "Delete Trigger", - "description": "Deletes the trigger with the given ID", - "operationId": "delete_trigger_v1_triggers__trigger_id__delete", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "trigger_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Trigger Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeleteTriggerV1Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/batch-completions": { - "post": { - "summary": "Batch Completions", - "operationId": "batch_completions_v2_batch_completions_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateBatchCompletionsV2Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BatchCompletionsJob" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v2/batch-completions/{batch_completion_id}": { - "get": { - "summary": "Get Batch Completion", - "operationId": "get_batch_completion_v2_batch_completions__batch_completion_id__get", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "batch_completion_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Batch Completion Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetBatchCompletionV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "post": { - "summary": "Update Batch Completion", - "operationId": "update_batch_completion_v2_batch_completions__batch_completion_id__post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "batch_completion_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Batch Completion Id" - } - } - ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateBatchCompletionsV2Request" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateBatchCompletionsV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/batch-completions/{batch_completion_id}/actions/cancel": { - "post": { - "summary": "Cancel Batch Completion", - "operationId": "cancel_batch_completion_v2_batch_completions__batch_completion_id__actions_cancel_post", - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ], - "parameters": [ - { - "name": "batch_completion_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Batch Completion Id" - } - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CancelBatchCompletionsV2Response" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/chat/completions": { - "post": { - "summary": "Chat Completion", - "operationId": "chat_completion_v2_chat_completions_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ChatCompletionV2Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "anyOf": [ - { - "$ref": "#/components/schemas/CreateChatCompletionResponse" - }, - { - "$ref": "#/components/schemas/CreateChatCompletionStreamResponse" - }, - { - "$ref": "#/components/schemas/ChatCompletionV2StreamErrorChunk" - } - ], - "title": "Response Chat Completion V2 Chat Completions Post" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/v2/completions": { - "post": { - "summary": "Completion", - "operationId": "completion_v2_completions_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CompletionV2Request" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "anyOf": [ - { - "$ref": "#/components/schemas/CreateCompletionResponse" - }, - { - "$ref": "#/components/schemas/CompletionV2StreamErrorChunk" - } - ], - "title": "Response Completion V2 Completions Post" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - }, - "security": [ - { - "HTTPBasic": [] - }, - { - "OAuth2PasswordBearer": [] - } - ] - } - }, - "/healthcheck": { - "get": { - "summary": "Healthcheck", - "description": "Returns 200 if the app is healthy.", - "operationId": "healthcheck_healthcheck_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - } - } - } - }, - "/healthz": { - "get": { - "summary": "Healthcheck", - "description": "Returns 200 if the app is healthy.", - "operationId": "healthcheck_healthz_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - } - } - } - }, - "/readyz": { - "get": { - "summary": "Healthcheck", - "description": "Returns 200 if the app is healthy.", - "operationId": "healthcheck_readyz_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - } - } - } - } - }, - "components": { - "schemas": { - "Annotation": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the URL citation. Always `url_citation`.", - "enum": [ - "url_citation" - ] - }, - "url_citation": { - "$ref": "#/components/schemas/UrlCitation", - "description": "A URL citation when using web search." - } - }, - "type": "object", - "required": [ - "type", - "url_citation" - ], - "title": "Annotation" - }, - "Audio": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "Unique identifier for a previous audio response from the model.\n" - } - }, - "type": "object", - "required": [ - "id" - ], - "title": "Audio" - }, - "Audio1": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "Unique identifier for this audio response." - }, - "expires_at": { - "type": "integer", - "title": "Expires At", - "description": "The Unix timestamp (in seconds) for when this audio response will\nno longer be accessible on the server for use in multi-turn\nconversations.\n" - }, - "data": { - "type": "string", - "title": "Data", - "description": "Base64 encoded audio bytes generated by the model, in the format\nspecified in the request.\n" - }, - "transcript": { - "type": "string", - "title": "Transcript", - "description": "Transcript of the audio generated by the model." - } - }, - "type": "object", - "required": [ - "id", - "expires_at", - "data", - "transcript" - ], - "title": "Audio1" - }, - "Audio2": { - "properties": { - "voice": { - "$ref": "#/components/schemas/VoiceIdsShared", - "description": "The voice the model uses to respond. Supported voices are \n`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.\n" - }, - "format": { - "type": "string", - "enum": [ - "wav", - "aac", - "mp3", - "flac", - "opus", - "pcm16" - ], - "title": "Format", - "description": "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`.\n" - } - }, - "type": "object", - "required": [ - "voice", - "format" - ], - "title": "Audio2" - }, - "BatchCompletionsJob": { - "properties": { - "job_id": { - "type": "string", - "title": "Job Id" - }, - "input_data_path": { - "title": "Input Data Path", - "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", - "type": "string", - "nullable": true - }, - "output_data_path": { - "type": "string", - "title": "Output Data Path", - "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." - }, - "model_config": { - "$ref": "#/components/schemas/BatchCompletionsModelConfig", - "description": "Model configuration for the batch inference. Hardware configurations are inferred." - }, - "priority": { - "title": "Priority", - "description": "Priority of the batch inference job. Default to None.", - "type": "string", - "nullable": true - }, - "status": { - "$ref": "#/components/schemas/BatchCompletionsJobStatus" - }, - "created_at": { - "type": "string", - "title": "Created At" - }, - "expires_at": { - "type": "string", - "title": "Expires At" - }, - "completed_at": { - "title": "Completed At", - "type": "string", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - } - }, - "type": "object", - "required": [ - "job_id", - "output_data_path", - "model_config", - "status", - "created_at", - "expires_at", - "completed_at", - "metadata" - ], - "title": "BatchCompletionsJob" - }, - "BatchCompletionsJobStatus": { - "type": "string", - "enum": [ - "queued", - "running", - "completed", - "failed", - "cancelled", - "unknown" - ], - "title": "BatchCompletionsJobStatus" - }, - "BatchCompletionsModelConfig": { - "properties": { - "max_model_len": { - "title": "Max Model Len", - "description": "Model context length, If unspecified, will be automatically derived from the model config", - "type": "integer", - "nullable": true - }, - "max_num_seqs": { - "title": "Max Num Seqs", - "description": "Maximum number of sequences per iteration", - "type": "integer", - "nullable": true - }, - "enforce_eager": { - "title": "Enforce Eager", - "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", - "type": "boolean", - "nullable": true - }, - "trust_remote_code": { - "title": "Trust Remote Code", - "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", - "default": false, - "type": "boolean", - "nullable": true - }, - "pipeline_parallel_size": { - "title": "Pipeline Parallel Size", - "description": "Number of pipeline stages. Default to None.", - "type": "integer", - "nullable": true - }, - "tensor_parallel_size": { - "title": "Tensor Parallel Size", - "description": "Number of tensor parallel replicas. Default to None.", - "type": "integer", - "nullable": true - }, - "quantization": { - "title": "Quantization", - "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", - "type": "string", - "nullable": true - }, - "disable_log_requests": { - "title": "Disable Log Requests", - "description": "Disable logging requests. Default to None.", - "type": "boolean", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "tool_call_parser": { - "title": "Tool Call Parser", - "description": "Tool call parser", - "type": "string", - "nullable": true - }, - "enable_auto_tool_choice": { - "title": "Enable Auto Tool Choice", - "description": "Enable auto tool choice", - "type": "boolean", - "nullable": true - }, - "load_format": { - "title": "Load Format", - "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", - "type": "string", - "nullable": true - }, - "config_format": { - "title": "Config Format", - "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", - "type": "string", - "nullable": true - }, - "tokenizer_mode": { - "title": "Tokenizer Mode", - "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", - "type": "string", - "nullable": true - }, - "limit_mm_per_prompt": { - "title": "Limit Mm Per Prompt", - "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", - "type": "string", - "nullable": true - }, - "max_num_batched_tokens": { - "title": "Max Num Batched Tokens", - "description": "Maximum number of batched tokens per iteration", - "type": "integer", - "nullable": true - }, - "tokenizer": { - "title": "Tokenizer", - "description": "Name or path of the huggingface tokenizer to use.", - "type": "string", - "nullable": true - }, - "dtype": { - "title": "Dtype", - "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", - "type": "string", - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "Random seed for the model.", - "type": "integer", - "nullable": true - }, - "revision": { - "title": "Revision", - "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "code_revision": { - "title": "Code Revision", - "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "rope_scaling": { - "title": "Rope Scaling", - "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "tokenizer_revision": { - "title": "Tokenizer Revision", - "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "quantization_param_path": { - "title": "Quantization Param Path", - "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", - "type": "string", - "nullable": true - }, - "max_seq_len_to_capture": { - "title": "Max Seq Len To Capture", - "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", - "type": "integer", - "nullable": true - }, - "disable_sliding_window": { - "title": "Disable Sliding Window", - "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", - "type": "boolean", - "nullable": true - }, - "skip_tokenizer_init": { - "title": "Skip Tokenizer Init", - "description": "If true, skip initialization of tokenizer and detokenizer.", - "type": "boolean", - "nullable": true - }, - "served_model_name": { - "title": "Served Model Name", - "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", - "type": "string", - "nullable": true - }, - "override_neuron_config": { - "title": "Override Neuron Config", - "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "mm_processor_kwargs": { - "title": "Mm Processor Kwargs", - "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "block_size": { - "title": "Block Size", - "description": "Size of a cache block in number of tokens.", - "type": "integer", - "nullable": true - }, - "gpu_memory_utilization": { - "title": "Gpu Memory Utilization", - "description": "Fraction of GPU memory to use for the vLLM execution.", - "type": "number", - "nullable": true - }, - "swap_space": { - "title": "Swap Space", - "description": "Size of the CPU swap space per GPU (in GiB).", - "type": "number", - "nullable": true - }, - "cache_dtype": { - "title": "Cache Dtype", - "description": "Data type for kv cache storage.", - "type": "string", - "nullable": true - }, - "num_gpu_blocks_override": { - "title": "Num Gpu Blocks Override", - "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", - "type": "integer", - "nullable": true - }, - "enable_prefix_caching": { - "title": "Enable Prefix Caching", - "description": "Enables automatic prefix caching.", - "type": "boolean", - "nullable": true - }, - "model": { - "type": "string", - "title": "Model", - "description": "ID of the model to use.", - "example": "mixtral-8x7b-instruct" - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "description": "Path to the checkpoint to load the model from.", - "type": "string", - "nullable": true - }, - "num_shards": { - "title": "Num Shards", - "description": "\nSuggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config.\nSystem may decide to use a different number than the given value.\n", - "default": 1, - "type": "integer", - "minimum": 1.0, - "nullable": true - }, - "max_context_length": { - "title": "Max Context Length", - "description": "Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len.", - "type": "integer", - "minimum": 1.0, - "nullable": true - }, - "response_role": { - "title": "Response Role", - "description": "Role of the response in the conversation. Only supported in chat completions.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "model" - ], - "title": "BatchCompletionsModelConfig" - }, - "BatchJobSerializationFormat": { - "type": "string", - "enum": [ - "JSON", - "PICKLE" - ], - "title": "BatchJobSerializationFormat" - }, - "BatchJobStatus": { - "type": "string", - "enum": [ - "PENDING", - "RUNNING", - "SUCCESS", - "FAILURE", - "CANCELLED", - "UNDEFINED", - "TIMEOUT" - ], - "title": "BatchJobStatus" - }, - "Body_upload_file_v1_files_post": { - "properties": { - "file": { - "type": "string", - "format": "binary", - "title": "File" - } - }, - "type": "object", - "required": [ - "file" - ], - "title": "Body_upload_file_v1_files_post" - }, - "CallbackAuth": { - "oneOf": [ - { - "$ref": "#/components/schemas/CallbackBasicAuth" - }, - { - "$ref": "#/components/schemas/CallbackmTLSAuth" - } - ], - "title": "CallbackAuth", - "discriminator": { - "propertyName": "kind", - "mapping": { - "basic": "#/components/schemas/CallbackBasicAuth", - "mtls": "#/components/schemas/CallbackmTLSAuth" - } - } - }, - "CallbackBasicAuth": { - "properties": { - "kind": { - "type": "string", - "title": "Kind", - "enum": [ - "basic" - ] - }, - "username": { - "type": "string", - "title": "Username" - }, - "password": { - "type": "string", - "title": "Password" - } - }, - "type": "object", - "required": [ - "kind", - "username", - "password" - ], - "title": "CallbackBasicAuth" - }, - "CallbackmTLSAuth": { - "properties": { - "kind": { - "type": "string", - "title": "Kind", - "enum": [ - "mtls" - ] - }, - "cert": { - "type": "string", - "title": "Cert" - }, - "key": { - "type": "string", - "title": "Key" - } - }, - "type": "object", - "required": [ - "kind", - "cert", - "key" - ], - "title": "CallbackmTLSAuth" - }, - "CancelBatchCompletionsV2Response": { - "properties": { - "success": { - "type": "boolean", - "title": "Success", - "description": "Whether the cancellation was successful" - } - }, - "type": "object", - "required": [ - "success" - ], - "title": "CancelBatchCompletionsV2Response" - }, - "CancelFineTuneResponse": { - "properties": { - "success": { - "type": "boolean", - "title": "Success" - } - }, - "type": "object", - "required": [ - "success" - ], - "title": "CancelFineTuneResponse" - }, - "ChatCompletionFunctionCallOption": { - "properties": { - "name": { - "type": "string", - "title": "Name", - "description": "The name of the function to call." - } - }, - "type": "object", - "required": [ - "name" - ], - "title": "ChatCompletionFunctionCallOption" - }, - "ChatCompletionFunctions": { - "properties": { - "description": { - "title": "Description", - "description": "A description of what the function does, used by the model to choose when and how to call the function.", - "type": "string", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name", - "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." - }, - "parameters": { - "$ref": "#/components/schemas/FunctionParameters", - "nullable": true - } - }, - "type": "object", - "required": [ - "name" - ], - "title": "ChatCompletionFunctions" - }, - "ChatCompletionMessageToolCall": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "The ID of the tool call." - }, - "type": { - "type": "string", - "title": "Type", - "description": "The type of the tool. Currently, only `function` is supported.", - "enum": [ - "function" - ] - }, - "function": { - "$ref": "#/components/schemas/Function1", - "description": "The function that the model called." - } - }, - "type": "object", - "required": [ - "id", - "type", - "function" - ], - "title": "ChatCompletionMessageToolCall" - }, - "ChatCompletionMessageToolCallChunk": { - "properties": { - "index": { - "type": "integer", - "title": "Index" - }, - "id": { - "title": "Id", - "description": "The ID of the tool call.", - "type": "string", - "nullable": true - }, - "type": { - "title": "Type", - "description": "The type of the tool. Currently, only `function` is supported.", - "type": "string", - "nullable": true, - "enum": [ - "function" - ] - }, - "function": { - "$ref": "#/components/schemas/Function2", - "nullable": true - } - }, - "type": "object", - "required": [ - "index" - ], - "title": "ChatCompletionMessageToolCallChunk" - }, - "ChatCompletionMessageToolCalls-Input": { - "items": { - "$ref": "#/components/schemas/ChatCompletionMessageToolCall" - }, - "type": "array", - "title": "ChatCompletionMessageToolCalls", - "description": "The tool calls generated by the model, such as function calls." - }, - "ChatCompletionMessageToolCalls-Output": { - "items": { - "$ref": "#/components/schemas/ChatCompletionMessageToolCall" - }, - "type": "array", - "title": "ChatCompletionMessageToolCalls", - "description": "The tool calls generated by the model, such as function calls." - }, - "ChatCompletionNamedToolChoice": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the tool. Currently, only `function` is supported.", - "enum": [ - "function" - ] - }, - "function": { - "$ref": "#/components/schemas/Function3" - } - }, - "type": "object", - "required": [ - "type", - "function" - ], - "title": "ChatCompletionNamedToolChoice" - }, - "ChatCompletionRequestAssistantMessage": { - "properties": { - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/Content" - } - ], - "title": "Content", - "description": "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n", - "nullable": true - }, - "refusal": { - "title": "Refusal", - "description": "The refusal message by the assistant.", - "type": "string", - "nullable": true - }, - "role": { - "type": "string", - "title": "Role", - "description": "The role of the messages author, in this case `assistant`.", - "enum": [ - "assistant" - ] - }, - "name": { - "title": "Name", - "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", - "type": "string", - "nullable": true - }, - "audio": { - "description": "Data about a previous audio response from the model. \n[Learn more](/docs/guides/audio).\n", - "$ref": "#/components/schemas/Audio", - "nullable": true - }, - "tool_calls": { - "$ref": "#/components/schemas/ChatCompletionMessageToolCalls-Input", - "nullable": true - }, - "function_call": { - "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", - "$ref": "#/components/schemas/FunctionCall", - "nullable": true - } - }, - "type": "object", - "required": [ - "role" - ], - "title": "ChatCompletionRequestAssistantMessage" - }, - "ChatCompletionRequestAssistantMessageContentPart": { - "anyOf": [ - { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" - } - ], - "title": "ChatCompletionRequestAssistantMessageContentPart" - }, - "ChatCompletionRequestDeveloperMessage": { - "properties": { - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/Content1" - } - ], - "title": "Content", - "description": "The contents of the developer message." - }, - "role": { - "type": "string", - "title": "Role", - "description": "The role of the messages author, in this case `developer`.", - "enum": [ - "developer" - ] - }, - "name": { - "title": "Name", - "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "content", - "role" - ], - "title": "ChatCompletionRequestDeveloperMessage" - }, - "ChatCompletionRequestFunctionMessage": { - "properties": { - "role": { - "type": "string", - "title": "Role", - "description": "The role of the messages author, in this case `function`.", - "enum": [ - "function" - ] - }, - "content": { - "title": "Content", - "description": "The contents of the function message.", - "type": "string", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name", - "description": "The name of the function to call." - } - }, - "type": "object", - "required": [ - "role", - "name" - ], - "title": "ChatCompletionRequestFunctionMessage" - }, - "ChatCompletionRequestMessage": { - "anyOf": [ - { - "$ref": "#/components/schemas/ChatCompletionRequestDeveloperMessage" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestSystemMessage" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestUserMessage" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestAssistantMessage" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestToolMessage" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestFunctionMessage" - } - ], - "title": "ChatCompletionRequestMessage" - }, - "ChatCompletionRequestMessageContentPartAudio": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the content part. Always `input_audio`.", - "enum": [ - "input_audio" - ] - }, - "input_audio": { - "$ref": "#/components/schemas/InputAudio" - } - }, - "type": "object", - "required": [ - "type", - "input_audio" - ], - "title": "ChatCompletionRequestMessageContentPartAudio" - }, - "ChatCompletionRequestMessageContentPartFile": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the content part. Always `file`.", - "enum": [ - "file" - ] - }, - "file": { - "$ref": "#/components/schemas/File" - } - }, - "type": "object", - "required": [ - "type", - "file" - ], - "title": "ChatCompletionRequestMessageContentPartFile" - }, - "ChatCompletionRequestMessageContentPartImage": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the content part.", - "enum": [ - "image_url" - ] - }, - "image_url": { - "$ref": "#/components/schemas/ImageUrl" - } - }, - "type": "object", - "required": [ - "type", - "image_url" - ], - "title": "ChatCompletionRequestMessageContentPartImage" - }, - "ChatCompletionRequestMessageContentPartRefusal": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the content part.", - "enum": [ - "refusal" - ] - }, - "refusal": { - "type": "string", - "title": "Refusal", - "description": "The refusal message generated by the model." - } - }, - "type": "object", - "required": [ - "type", - "refusal" - ], - "title": "ChatCompletionRequestMessageContentPartRefusal" - }, - "ChatCompletionRequestMessageContentPartText": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the content part.", - "enum": [ - "text" - ] - }, - "text": { - "type": "string", - "title": "Text", - "description": "The text content." - } - }, - "type": "object", - "required": [ - "type", - "text" - ], - "title": "ChatCompletionRequestMessageContentPartText" - }, - "ChatCompletionRequestSystemMessage": { - "properties": { - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/Content2" - } - ], - "title": "Content", - "description": "The contents of the system message." - }, - "role": { - "type": "string", - "title": "Role", - "description": "The role of the messages author, in this case `system`.", - "enum": [ - "system" - ] - }, - "name": { - "title": "Name", - "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "content", - "role" - ], - "title": "ChatCompletionRequestSystemMessage" - }, - "ChatCompletionRequestSystemMessageContentPart": { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText", - "title": "ChatCompletionRequestSystemMessageContentPart" - }, - "ChatCompletionRequestToolMessage": { - "properties": { - "role": { - "type": "string", - "title": "Role", - "description": "The role of the messages author, in this case `tool`.", - "enum": [ - "tool" - ] - }, - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/Content3" - } - ], - "title": "Content", - "description": "The contents of the tool message." - }, - "tool_call_id": { - "type": "string", - "title": "Tool Call Id", - "description": "Tool call that this message is responding to." - } - }, - "type": "object", - "required": [ - "role", - "content", - "tool_call_id" - ], - "title": "ChatCompletionRequestToolMessage" - }, - "ChatCompletionRequestToolMessageContentPart": { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText", - "title": "ChatCompletionRequestToolMessageContentPart" - }, - "ChatCompletionRequestUserMessage": { - "properties": { - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/Content4" - } - ], - "title": "Content", - "description": "The contents of the user message.\n" - }, - "role": { - "type": "string", - "title": "Role", - "description": "The role of the messages author, in this case `user`.", - "enum": [ - "user" - ] - }, - "name": { - "title": "Name", - "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "content", - "role" - ], - "title": "ChatCompletionRequestUserMessage" - }, - "ChatCompletionRequestUserMessageContentPart": { - "anyOf": [ - { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartAudio" - }, - { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartFile" - } - ], - "title": "ChatCompletionRequestUserMessageContentPart" - }, - "ChatCompletionResponseMessage": { - "properties": { - "content": { - "title": "Content", - "description": "The contents of the message.", - "type": "string", - "nullable": true - }, - "refusal": { - "title": "Refusal", - "description": "The refusal message generated by the model.", - "type": "string", - "nullable": true - }, - "tool_calls": { - "$ref": "#/components/schemas/ChatCompletionMessageToolCalls-Output", - "nullable": true - }, - "annotations": { - "title": "Annotations", - "description": "Annotations for the message, when applicable, as when using the\n[web search tool](/docs/guides/tools-web-search?api-mode=chat).\n", - "items": { - "$ref": "#/components/schemas/Annotation" - }, - "type": "array", - "nullable": true - }, - "role": { - "type": "string", - "title": "Role", - "description": "The role of the author of this message.", - "enum": [ - "assistant" - ] - }, - "function_call": { - "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", - "$ref": "#/components/schemas/FunctionCall", - "nullable": true - }, - "audio": { - "description": "If the audio output modality is requested, this object contains data\nabout the audio response from the model. [Learn more](/docs/guides/audio).\n", - "$ref": "#/components/schemas/Audio1", - "nullable": true - } - }, - "type": "object", - "required": [ - "role" - ], - "title": "ChatCompletionResponseMessage" - }, - "ChatCompletionStreamOptions": { - "properties": { - "include_usage": { - "title": "Include Usage", - "description": "If set, an additional chunk will be streamed before the `data: [DONE]`\nmessage. The `usage` field on this chunk shows the token usage statistics\nfor the entire request, and the `choices` field will always be an empty\narray. \n\nAll other chunks will also include a `usage` field, but with a null\nvalue. **NOTE:** If the stream is interrupted, you may not receive the\nfinal usage chunk which contains the total token usage for the request.\n", - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "title": "ChatCompletionStreamOptions" - }, - "ChatCompletionStreamResponseDelta": { - "properties": { - "content": { - "title": "Content", - "description": "The contents of the chunk message.", - "type": "string", - "nullable": true - }, - "function_call": { - "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", - "$ref": "#/components/schemas/FunctionCall2", - "nullable": true - }, - "tool_calls": { - "title": "Tool Calls", - "items": { - "$ref": "#/components/schemas/ChatCompletionMessageToolCallChunk" - }, - "type": "array", - "nullable": true - }, - "role": { - "title": "Role", - "description": "The role of the author of this message.", - "type": "string", - "enum": [ - "developer", - "system", - "user", - "assistant", - "tool" - ], - "nullable": true - }, - "refusal": { - "title": "Refusal", - "description": "The refusal message generated by the model.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "title": "ChatCompletionStreamResponseDelta" - }, - "ChatCompletionTokenLogprob": { - "properties": { - "token": { - "type": "string", - "title": "Token", - "description": "The token." - }, - "logprob": { - "type": "number", - "title": "Logprob", - "description": "The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely." - }, - "bytes": { - "title": "Bytes", - "description": "A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "top_logprobs": { - "items": { - "$ref": "#/components/schemas/TopLogprob" - }, - "type": "array", - "title": "Top Logprobs", - "description": "List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned." - } - }, - "type": "object", - "required": [ - "token", - "logprob", - "bytes", - "top_logprobs" - ], - "title": "ChatCompletionTokenLogprob" - }, - "ChatCompletionTool": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the tool. Currently, only `function` is supported.", - "enum": [ - "function" - ] - }, - "function": { - "$ref": "#/components/schemas/FunctionObject" - } - }, - "type": "object", - "required": [ - "type", - "function" - ], - "title": "ChatCompletionTool" - }, - "ChatCompletionToolChoiceOption": { - "anyOf": [ - { - "type": "string", - "enum": [ - "none", - "auto", - "required" - ] - }, - { - "$ref": "#/components/schemas/ChatCompletionNamedToolChoice" - } - ], - "title": "ChatCompletionToolChoiceOption", - "description": "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" - }, - "ChatCompletionV2Request": { - "properties": { - "best_of": { - "title": "Best Of", - "description": "Number of output sequences that are generated from the prompt.\n From these `best_of` sequences, the top `n` sequences are returned.\n `best_of` must be greater than or equal to `n`. This is treated as\n the beam width when `use_beam_search` is True. By default, `best_of`\n is set to `n`.", - "type": "integer", - "nullable": true - }, - "top_k": { - "title": "Top K", - "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", - "type": "integer", - "minimum": -1.0, - "nullable": true - }, - "min_p": { - "title": "Min P", - "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", - "type": "number", - "nullable": true - }, - "use_beam_search": { - "title": "Use Beam Search", - "description": "Whether to use beam search for sampling.", - "type": "boolean", - "nullable": true - }, - "length_penalty": { - "title": "Length Penalty", - "description": "Float that penalizes sequences based on their length.\n Used in beam search.", - "type": "number", - "nullable": true - }, - "repetition_penalty": { - "title": "Repetition Penalty", - "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", - "type": "number", - "nullable": true - }, - "early_stopping": { - "title": "Early Stopping", - "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", - "type": "boolean", - "nullable": true - }, - "stop_token_ids": { - "title": "Stop Token Ids", - "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "include_stop_str_in_output": { - "title": "Include Stop Str In Output", - "description": "Whether to include the stop strings in\n output text. Defaults to False.", - "type": "boolean", - "nullable": true - }, - "ignore_eos": { - "title": "Ignore Eos", - "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", - "type": "boolean", - "nullable": true - }, - "min_tokens": { - "title": "Min Tokens", - "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", - "type": "integer", - "nullable": true - }, - "skip_special_tokens": { - "title": "Skip Special Tokens", - "description": "Whether to skip special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "spaces_between_special_tokens": { - "title": "Spaces Between Special Tokens", - "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "echo": { - "title": "Echo", - "description": "If true, the new message will be prepended with the last message if they belong to the same role.", - "type": "boolean", - "nullable": true - }, - "add_generation_prompt": { - "title": "Add Generation Prompt", - "description": "If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model.", - "type": "boolean", - "nullable": true - }, - "continue_final_message": { - "title": "Continue Final Message", - "description": "If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`.", - "type": "boolean", - "nullable": true - }, - "add_special_tokens": { - "title": "Add Special Tokens", - "description": "If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default).", - "type": "boolean", - "nullable": true - }, - "documents": { - "title": "Documents", - "description": "A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys.", - "items": { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - "type": "array", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given", - "type": "string", - "nullable": true - }, - "chat_template_kwargs": { - "title": "Chat Template Kwargs", - "description": "Additional kwargs to pass to the template renderer. Will be accessible by the chat template.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_json": { - "title": "Guided Json", - "description": "JSON schema for guided decoding. Only supported in vllm.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_regex": { - "title": "Guided Regex", - "description": "Regex for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_choice": { - "title": "Guided Choice", - "description": "Choices for guided decoding. Only supported in vllm.", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "guided_grammar": { - "title": "Guided Grammar", - "description": "Context-free grammar for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_decoding_backend": { - "title": "Guided Decoding Backend", - "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", - "type": "string", - "nullable": true - }, - "guided_whitespace_pattern": { - "title": "Guided Whitespace Pattern", - "description": "If specified, will override the default whitespace pattern for guided json decoding.", - "type": "string", - "nullable": true - }, - "priority": { - "title": "Priority", - "description": "The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.", - "type": "integer", - "nullable": true - }, - "metadata": { - "$ref": "#/components/schemas/Metadata", - "nullable": true - }, - "temperature": { - "title": "Temperature", - "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.\n", - "default": 1, - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "top_p": { - "title": "Top P", - "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", - "default": 1, - "type": "number", - "maximum": 1.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "user": { - "title": "User", - "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", - "type": "string", - "nullable": true, - "example": "user-1234" - }, - "service_tier": { - "$ref": "#/components/schemas/ServiceTier", - "nullable": true - }, - "messages": { - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestMessage" - }, - "type": "array", - "minItems": 1, - "title": "Messages", - "description": "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n" - }, - "model": { - "type": "string", - "title": "Model", - "description": "ID of the model to use.", - "example": "mixtral-8x7b-instruct" - }, - "modalities": { - "$ref": "#/components/schemas/ResponseModalities", - "nullable": true - }, - "reasoning_effort": { - "$ref": "#/components/schemas/ReasoningEffort", - "nullable": true - }, - "max_completion_tokens": { - "title": "Max Completion Tokens", - "description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n", - "type": "integer", - "nullable": true - }, - "frequency_penalty": { - "title": "Frequency Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim.\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "presence_penalty": { - "title": "Presence Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics.\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "web_search_options": { - "title": "Web search", - "description": "This tool searches the web for relevant results to use in a response.\nLearn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).\n", - "$ref": "#/components/schemas/WebSearchOptions", - "nullable": true - }, - "top_logprobs": { - "title": "Top Logprobs", - "description": "An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n", - "type": "integer", - "maximum": 20.0, - "minimum": 0.0, - "nullable": true - }, - "response_format": { - "anyOf": [ - { - "$ref": "#/components/schemas/ResponseFormatText" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonSchema" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonObject" - } - ], - "title": "Response Format", - "description": "An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.\n", - "nullable": true - }, - "audio": { - "description": "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](/docs/guides/audio).\n", - "$ref": "#/components/schemas/Audio2", - "nullable": true - }, - "store": { - "title": "Store", - "description": "Whether or not to store the output of this chat completion request for \nuse in our [model distillation](/docs/guides/distillation) or\n[evals](/docs/guides/evals) products.\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "stream": { - "title": "Stream", - "description": "If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "stop": { - "$ref": "#/components/schemas/StopConfiguration", - "nullable": true - }, - "logit_bias": { - "title": "Logit Bias", - "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the\ntokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should\ndecrease or increase likelihood of selection; values like -100 or 100\nshould result in a ban or exclusive selection of the relevant token.\n", - "additionalProperties": { - "type": "integer" - }, - "type": "object", - "nullable": true - }, - "logprobs": { - "title": "Logprobs", - "description": "Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the\n`content` of `message`.\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "max_tokens": { - "title": "Max Tokens", - "description": "The maximum number of [tokens](/tokenizer) that can be generated in the\nchat completion. This value can be used to control\n[costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with [o-series models](/docs/guides/reasoning).\n", - "type": "integer", - "nullable": true - }, - "n": { - "title": "N", - "description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.", - "default": 1, - "type": "integer", - "maximum": 128.0, - "minimum": 1.0, - "nullable": true, - "example": 1 - }, - "prediction": { - "description": "Configuration for a [Predicted Output](/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.\n", - "$ref": "#/components/schemas/PredictionContent", - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", - "type": "integer", - "maximum": 9.223372036854776e+18, - "minimum": -9.223372036854776e+18, - "nullable": true - }, - "stream_options": { - "$ref": "#/components/schemas/ChatCompletionStreamOptions", - "nullable": true - }, - "tools": { - "title": "Tools", - "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n", - "items": { - "$ref": "#/components/schemas/ChatCompletionTool" - }, - "type": "array", - "nullable": true - }, - "tool_choice": { - "$ref": "#/components/schemas/ChatCompletionToolChoiceOption", - "nullable": true - }, - "parallel_tool_calls": { - "$ref": "#/components/schemas/ParallelToolCalls", - "nullable": true - }, - "function_call": { - "anyOf": [ - { - "type": "string", - "enum": [ - "none", - "auto" - ] - }, - { - "$ref": "#/components/schemas/ChatCompletionFunctionCallOption" - } - ], - "title": "Function Call", - "description": "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n\n`none` means the model will not call a function and instead generates a\nmessage.\n\n`auto` means the model can pick between generating a message or calling a\nfunction.\n\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the\nmodel to call that function.\n\n`none` is the default when no functions are present. `auto` is the default\nif functions are present.\n", - "nullable": true - }, - "functions": { - "title": "Functions", - "description": "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n", - "items": { - "$ref": "#/components/schemas/ChatCompletionFunctions" - }, - "type": "array", - "maxItems": 128, - "minItems": 1, - "nullable": true - } - }, - "type": "object", - "required": [ - "messages", - "model" - ], - "title": "ChatCompletionV2Request" - }, - "ChatCompletionV2StreamErrorChunk": { - "properties": { - "error": { - "$ref": "#/components/schemas/StreamError" - } - }, - "type": "object", - "required": [ - "error" - ], - "title": "ChatCompletionV2StreamErrorChunk" - }, - "Choice": { - "properties": { - "finish_reason": { - "type": "string", - "enum": [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call" - ], - "title": "Finish Reason", - "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" - }, - "index": { - "type": "integer", - "title": "Index", - "description": "The index of the choice in the list of choices." - }, - "message": { - "$ref": "#/components/schemas/ChatCompletionResponseMessage" - }, - "logprobs": { - "description": "Log probability information for the choice.", - "$ref": "#/components/schemas/Logprobs", - "nullable": true - } - }, - "type": "object", - "required": [ - "finish_reason", - "index", - "message", - "logprobs" - ], - "title": "Choice" - }, - "Choice1": { - "properties": { - "delta": { - "$ref": "#/components/schemas/ChatCompletionStreamResponseDelta" - }, - "logprobs": { - "description": "Log probability information for the choice.", - "$ref": "#/components/schemas/Logprobs", - "nullable": true - }, - "finish_reason": { - "title": "Finish Reason", - "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n", - "type": "string", - "enum": [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call" - ], - "nullable": true - }, - "index": { - "type": "integer", - "title": "Index", - "description": "The index of the choice in the list of choices." - } - }, - "type": "object", - "required": [ - "delta", - "finish_reason", - "index" - ], - "title": "Choice1" - }, - "Choice2": { - "properties": { - "finish_reason": { - "type": "string", - "enum": [ - "stop", - "length", - "content_filter" - ], - "title": "Finish Reason", - "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" - }, - "index": { - "type": "integer", - "title": "Index" - }, - "logprobs": { - "$ref": "#/components/schemas/Logprobs2", - "nullable": true - }, - "text": { - "type": "string", - "title": "Text" - } - }, - "type": "object", - "required": [ - "finish_reason", - "index", - "logprobs", - "text" - ], - "title": "Choice2" - }, - "CloneModelBundleV1Request": { - "properties": { - "original_model_bundle_id": { - "type": "string", - "title": "Original Model Bundle Id" - }, - "new_app_config": { - "title": "New App Config", - "additionalProperties": true, - "type": "object", - "nullable": true - } - }, - "type": "object", - "required": [ - "original_model_bundle_id" - ], - "title": "CloneModelBundleV1Request", - "description": "Request object for cloning a Model Bundle from another one." - }, - "CloneModelBundleV2Request": { - "properties": { - "original_model_bundle_id": { - "type": "string", - "title": "Original Model Bundle Id" - }, - "new_app_config": { - "title": "New App Config", - "additionalProperties": true, - "type": "object", - "nullable": true - } - }, - "type": "object", - "required": [ - "original_model_bundle_id" - ], - "title": "CloneModelBundleV2Request", - "description": "Request object for cloning a Model Bundle from another one." - }, - "CloudpickleArtifactFlavor": { - "properties": { - "requirements": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Requirements" - }, - "framework": { - "oneOf": [ - { - "$ref": "#/components/schemas/PytorchFramework" - }, - { - "$ref": "#/components/schemas/TensorflowFramework" - }, - { - "$ref": "#/components/schemas/CustomFramework" - } - ], - "title": "Framework", - "discriminator": { - "propertyName": "framework_type", - "mapping": { - "custom_base_image": "#/components/schemas/CustomFramework", - "pytorch": "#/components/schemas/PytorchFramework", - "tensorflow": "#/components/schemas/TensorflowFramework" - } - } - }, - "app_config": { - "title": "App Config", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "location": { - "type": "string", - "title": "Location" - }, - "flavor": { - "type": "string", - "title": "Flavor", - "enum": [ - "cloudpickle_artifact" - ] - }, - "load_predict_fn": { - "type": "string", - "title": "Load Predict Fn" - }, - "load_model_fn": { - "type": "string", - "title": "Load Model Fn" - } - }, - "type": "object", - "required": [ - "requirements", - "framework", - "location", - "flavor", - "load_predict_fn", - "load_model_fn" - ], - "title": "CloudpickleArtifactFlavor", - "description": "This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact." - }, - "CompletionOutput": { - "properties": { - "text": { - "type": "string", - "title": "Text" - }, - "num_prompt_tokens": { - "title": "Num Prompt Tokens", - "type": "integer", - "nullable": true - }, - "num_completion_tokens": { - "type": "integer", - "title": "Num Completion Tokens" - }, - "tokens": { - "title": "Tokens", - "items": { - "$ref": "#/components/schemas/TokenOutput" - }, - "type": "array", - "nullable": true - } - }, - "type": "object", - "required": [ - "text", - "num_completion_tokens" - ], - "title": "CompletionOutput", - "description": "Represents the output of a completion request to a model." - }, - "CompletionStreamOutput": { - "properties": { - "text": { - "type": "string", - "title": "Text" - }, - "finished": { - "type": "boolean", - "title": "Finished" - }, - "num_prompt_tokens": { - "title": "Num Prompt Tokens", - "type": "integer", - "nullable": true - }, - "num_completion_tokens": { - "title": "Num Completion Tokens", - "type": "integer", - "nullable": true - }, - "token": { - "$ref": "#/components/schemas/TokenOutput", - "nullable": true - } - }, - "type": "object", - "required": [ - "text", - "finished" - ], - "title": "CompletionStreamOutput" - }, - "CompletionStreamV1Request": { - "properties": { - "prompt": { - "type": "string", - "title": "Prompt" - }, - "max_new_tokens": { - "type": "integer", - "title": "Max New Tokens" - }, - "temperature": { - "type": "number", - "maximum": 1.0, - "minimum": 0.0, - "title": "Temperature" - }, - "stop_sequences": { - "title": "Stop Sequences", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "return_token_log_probs": { - "title": "Return Token Log Probs", - "default": false, - "type": "boolean", - "nullable": true - }, - "presence_penalty": { - "title": "Presence Penalty", - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true - }, - "frequency_penalty": { - "title": "Frequency Penalty", - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true - }, - "top_k": { - "title": "Top K", - "type": "integer", - "minimum": -1.0, - "nullable": true - }, - "top_p": { - "title": "Top P", - "type": "number", - "maximum": 1.0, - "exclusiveMinimum": 0.0, - "nullable": true - }, - "include_stop_str_in_output": { - "title": "Include Stop Str In Output", - "type": "boolean", - "nullable": true - }, - "guided_json": { - "title": "Guided Json", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_regex": { - "title": "Guided Regex", - "type": "string", - "nullable": true - }, - "guided_choice": { - "title": "Guided Choice", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "guided_grammar": { - "title": "Guided Grammar", - "type": "string", - "nullable": true - }, - "skip_special_tokens": { - "title": "Skip Special Tokens", - "default": true, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "prompt", - "max_new_tokens", - "temperature" - ], - "title": "CompletionStreamV1Request", - "description": "Request object for a stream prompt completion task." - }, - "CompletionStreamV1Response": { - "properties": { - "request_id": { - "title": "Request Id", - "type": "string", - "nullable": true - }, - "output": { - "$ref": "#/components/schemas/CompletionStreamOutput", - "nullable": true - }, - "error": { - "$ref": "#/components/schemas/StreamError", - "nullable": true - } - }, - "type": "object", - "required": [ - "request_id" - ], - "title": "CompletionStreamV1Response", - "description": "Error of the response (if any)." - }, - "CompletionSyncV1Request": { - "properties": { - "prompt": { - "type": "string", - "title": "Prompt" - }, - "max_new_tokens": { - "type": "integer", - "title": "Max New Tokens" - }, - "temperature": { - "type": "number", - "maximum": 1.0, - "minimum": 0.0, - "title": "Temperature" - }, - "stop_sequences": { - "title": "Stop Sequences", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "return_token_log_probs": { - "title": "Return Token Log Probs", - "default": false, - "type": "boolean", - "nullable": true - }, - "presence_penalty": { - "title": "Presence Penalty", - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true - }, - "frequency_penalty": { - "title": "Frequency Penalty", - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true - }, - "top_k": { - "title": "Top K", - "type": "integer", - "minimum": -1.0, - "nullable": true - }, - "top_p": { - "title": "Top P", - "type": "number", - "maximum": 1.0, - "exclusiveMinimum": 0.0, - "nullable": true - }, - "include_stop_str_in_output": { - "title": "Include Stop Str In Output", - "type": "boolean", - "nullable": true - }, - "guided_json": { - "title": "Guided Json", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_regex": { - "title": "Guided Regex", - "type": "string", - "nullable": true - }, - "guided_choice": { - "title": "Guided Choice", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "guided_grammar": { - "title": "Guided Grammar", - "type": "string", - "nullable": true - }, - "skip_special_tokens": { - "title": "Skip Special Tokens", - "default": true, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "prompt", - "max_new_tokens", - "temperature" - ], - "title": "CompletionSyncV1Request", - "description": "Request object for a synchronous prompt completion task." - }, - "CompletionSyncV1Response": { - "properties": { - "request_id": { - "title": "Request Id", - "type": "string", - "nullable": true - }, - "output": { - "$ref": "#/components/schemas/CompletionOutput", - "nullable": true - } - }, - "type": "object", - "title": "CompletionSyncV1Response", - "description": "Response object for a synchronous prompt completion." - }, - "CompletionTokensDetails": { - "properties": { - "accepted_prediction_tokens": { - "type": "integer", - "title": "Accepted Prediction Tokens", - "description": "When using Predicted Outputs, the number of tokens in the\nprediction that appeared in the completion.\n", - "default": 0 - }, - "audio_tokens": { - "type": "integer", - "title": "Audio Tokens", - "description": "Audio input tokens generated by the model.", - "default": 0 - }, - "reasoning_tokens": { - "type": "integer", - "title": "Reasoning Tokens", - "description": "Tokens generated by the model for reasoning.", - "default": 0 - }, - "rejected_prediction_tokens": { - "type": "integer", - "title": "Rejected Prediction Tokens", - "description": "When using Predicted Outputs, the number of tokens in the\nprediction that did not appear in the completion. However, like\nreasoning tokens, these tokens are still counted in the total\ncompletion tokens for purposes of billing, output, and context window\nlimits.\n", - "default": 0 - } - }, - "type": "object", - "title": "CompletionTokensDetails" - }, - "CompletionUsage": { - "properties": { - "completion_tokens": { - "type": "integer", - "title": "Completion Tokens", - "description": "Number of tokens in the generated completion." - }, - "prompt_tokens": { - "type": "integer", - "title": "Prompt Tokens", - "description": "Number of tokens in the prompt." - }, - "total_tokens": { - "type": "integer", - "title": "Total Tokens", - "description": "Total number of tokens used in the request (prompt + completion)." - }, - "completion_tokens_details": { - "description": "Breakdown of tokens used in a completion.", - "$ref": "#/components/schemas/CompletionTokensDetails", - "nullable": true - }, - "prompt_tokens_details": { - "description": "Breakdown of tokens used in the prompt.", - "$ref": "#/components/schemas/PromptTokensDetails", - "nullable": true - } - }, - "type": "object", - "required": [ - "completion_tokens", - "prompt_tokens", - "total_tokens" - ], - "title": "CompletionUsage" - }, - "CompletionV2Request": { - "properties": { - "best_of": { - "title": "Best Of", - "description": "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return \u2013 `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", - "default": 1, - "type": "integer", - "maximum": 20.0, - "minimum": 0.0, - "nullable": true - }, - "top_k": { - "title": "Top K", - "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", - "type": "integer", - "minimum": -1.0, - "nullable": true - }, - "min_p": { - "title": "Min P", - "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", - "type": "number", - "nullable": true - }, - "use_beam_search": { - "title": "Use Beam Search", - "description": "Whether to use beam search for sampling.", - "type": "boolean", - "nullable": true - }, - "length_penalty": { - "title": "Length Penalty", - "description": "Float that penalizes sequences based on their length.\n Used in beam search.", - "type": "number", - "nullable": true - }, - "repetition_penalty": { - "title": "Repetition Penalty", - "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", - "type": "number", - "nullable": true - }, - "early_stopping": { - "title": "Early Stopping", - "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", - "type": "boolean", - "nullable": true - }, - "stop_token_ids": { - "title": "Stop Token Ids", - "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "include_stop_str_in_output": { - "title": "Include Stop Str In Output", - "description": "Whether to include the stop strings in output text.", - "type": "boolean", - "nullable": true - }, - "ignore_eos": { - "title": "Ignore Eos", - "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", - "type": "boolean", - "nullable": true - }, - "min_tokens": { - "title": "Min Tokens", - "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", - "type": "integer", - "nullable": true - }, - "skip_special_tokens": { - "title": "Skip Special Tokens", - "description": "Whether to skip special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "spaces_between_special_tokens": { - "title": "Spaces Between Special Tokens", - "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "add_special_tokens": { - "title": "Add Special Tokens", - "description": "If true (the default), special tokens (e.g. BOS) will be added to the prompt.", - "type": "boolean", - "nullable": true - }, - "response_format": { - "anyOf": [ - { - "$ref": "#/components/schemas/ResponseFormatText" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonSchema" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonObject" - } - ], - "title": "Response Format", - "description": "Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported.", - "nullable": true - }, - "guided_json": { - "title": "Guided Json", - "description": "JSON schema for guided decoding. Only supported in vllm.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_regex": { - "title": "Guided Regex", - "description": "Regex for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_choice": { - "title": "Guided Choice", - "description": "Choices for guided decoding. Only supported in vllm.", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "guided_grammar": { - "title": "Guided Grammar", - "description": "Context-free grammar for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_decoding_backend": { - "title": "Guided Decoding Backend", - "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", - "type": "string", - "nullable": true - }, - "guided_whitespace_pattern": { - "title": "Guided Whitespace Pattern", - "description": "If specified, will override the default whitespace pattern for guided json decoding.", - "type": "string", - "nullable": true - }, - "model": { - "type": "string", - "title": "Model", - "description": "ID of the model to use.", - "example": "mixtral-8x7b-instruct" - }, - "prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "$ref": "#/components/schemas/Prompt" - }, - { - "$ref": "#/components/schemas/Prompt1" - } - ], - "title": "Prompt", - "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", - "nullable": true - }, - "echo": { - "title": "Echo", - "description": "Echo back the prompt in addition to the completion\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "frequency_penalty": { - "title": "Frequency Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "logit_bias": { - "title": "Logit Bias", - "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n", - "additionalProperties": { - "type": "integer" - }, - "type": "object", - "nullable": true - }, - "logprobs": { - "title": "Logprobs", - "description": "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n", - "type": "integer", - "maximum": 5.0, - "minimum": 0.0, - "nullable": true - }, - "max_tokens": { - "title": "Max Tokens", - "description": "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n", - "default": 16, - "type": "integer", - "minimum": 0.0, - "nullable": true, - "example": 16 - }, - "n": { - "title": "N", - "description": "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", - "default": 1, - "type": "integer", - "maximum": 128.0, - "minimum": 1.0, - "nullable": true, - "example": 1 - }, - "presence_penalty": { - "title": "Presence Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", - "type": "integer", - "nullable": true - }, - "stop": { - "$ref": "#/components/schemas/StopConfiguration", - "nullable": true - }, - "stream": { - "title": "Stream", - "description": "If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "stream_options": { - "$ref": "#/components/schemas/ChatCompletionStreamOptions", - "nullable": true - }, - "suffix": { - "title": "Suffix", - "description": "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n", - "type": "string", - "nullable": true, - "example": "test." - }, - "temperature": { - "title": "Temperature", - "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n", - "default": 1, - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "top_p": { - "title": "Top P", - "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", - "default": 1, - "type": "number", - "maximum": 1.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "user": { - "title": "User", - "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", - "type": "string", - "nullable": true, - "example": "user-1234" - } - }, - "type": "object", - "required": [ - "model", - "prompt" - ], - "title": "CompletionV2Request" - }, - "CompletionV2StreamErrorChunk": { - "properties": { - "error": { - "$ref": "#/components/schemas/StreamError" - } - }, - "type": "object", - "required": [ - "error" - ], - "title": "CompletionV2StreamErrorChunk" - }, - "Content": { - "title": "Content", - "description": "An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.", - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" - }, - "type": "array", - "minItems": 1, - "nullable": true - }, - "Content1": { - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" - }, - "type": "array", - "minItems": 1, - "title": "Content1", - "description": "An array of content parts with a defined type. For developer messages, only type `text` is supported." - }, - "Content2": { - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" - }, - "type": "array", - "minItems": 1, - "title": "Content2", - "description": "An array of content parts with a defined type. For system messages, only type `text` is supported." - }, - "Content3": { - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestToolMessageContentPart" - }, - "type": "array", - "minItems": 1, - "title": "Content3", - "description": "An array of content parts with a defined type. For tool messages, only type `text` is supported." - }, - "Content4": { - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestUserMessageContentPart" - }, - "type": "array", - "minItems": 1, - "title": "Content4", - "description": "An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs." - }, - "Content8": { - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" - }, - "type": "array", - "minItems": 1, - "title": "Content8", - "description": "An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs." - }, - "CreateAsyncTaskV1Response": { - "properties": { - "task_id": { - "type": "string", - "title": "Task Id" - } - }, - "type": "object", - "required": [ - "task_id" - ], - "title": "CreateAsyncTaskV1Response" - }, - "CreateBatchCompletionsV1ModelConfig": { - "properties": { - "max_model_len": { - "title": "Max Model Len", - "description": "Model context length, If unspecified, will be automatically derived from the model config", - "type": "integer", - "nullable": true - }, - "max_num_seqs": { - "title": "Max Num Seqs", - "description": "Maximum number of sequences per iteration", - "type": "integer", - "nullable": true - }, - "enforce_eager": { - "title": "Enforce Eager", - "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", - "type": "boolean", - "nullable": true - }, - "trust_remote_code": { - "title": "Trust Remote Code", - "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", - "default": false, - "type": "boolean", - "nullable": true - }, - "pipeline_parallel_size": { - "title": "Pipeline Parallel Size", - "description": "Number of pipeline stages. Default to None.", - "type": "integer", - "nullable": true - }, - "tensor_parallel_size": { - "title": "Tensor Parallel Size", - "description": "Number of tensor parallel replicas. Default to None.", - "type": "integer", - "nullable": true - }, - "quantization": { - "title": "Quantization", - "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", - "type": "string", - "nullable": true - }, - "disable_log_requests": { - "title": "Disable Log Requests", - "description": "Disable logging requests. Default to None.", - "type": "boolean", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "tool_call_parser": { - "title": "Tool Call Parser", - "description": "Tool call parser", - "type": "string", - "nullable": true - }, - "enable_auto_tool_choice": { - "title": "Enable Auto Tool Choice", - "description": "Enable auto tool choice", - "type": "boolean", - "nullable": true - }, - "load_format": { - "title": "Load Format", - "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", - "type": "string", - "nullable": true - }, - "config_format": { - "title": "Config Format", - "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", - "type": "string", - "nullable": true - }, - "tokenizer_mode": { - "title": "Tokenizer Mode", - "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", - "type": "string", - "nullable": true - }, - "limit_mm_per_prompt": { - "title": "Limit Mm Per Prompt", - "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", - "type": "string", - "nullable": true - }, - "max_num_batched_tokens": { - "title": "Max Num Batched Tokens", - "description": "Maximum number of batched tokens per iteration", - "type": "integer", - "nullable": true - }, - "tokenizer": { - "title": "Tokenizer", - "description": "Name or path of the huggingface tokenizer to use.", - "type": "string", - "nullable": true - }, - "dtype": { - "title": "Dtype", - "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", - "type": "string", - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "Random seed for the model.", - "type": "integer", - "nullable": true - }, - "revision": { - "title": "Revision", - "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "code_revision": { - "title": "Code Revision", - "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "rope_scaling": { - "title": "Rope Scaling", - "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "tokenizer_revision": { - "title": "Tokenizer Revision", - "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "quantization_param_path": { - "title": "Quantization Param Path", - "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", - "type": "string", - "nullable": true - }, - "max_seq_len_to_capture": { - "title": "Max Seq Len To Capture", - "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", - "type": "integer", - "nullable": true - }, - "disable_sliding_window": { - "title": "Disable Sliding Window", - "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", - "type": "boolean", - "nullable": true - }, - "skip_tokenizer_init": { - "title": "Skip Tokenizer Init", - "description": "If true, skip initialization of tokenizer and detokenizer.", - "type": "boolean", - "nullable": true - }, - "served_model_name": { - "title": "Served Model Name", - "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", - "type": "string", - "nullable": true - }, - "override_neuron_config": { - "title": "Override Neuron Config", - "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "mm_processor_kwargs": { - "title": "Mm Processor Kwargs", - "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "block_size": { - "title": "Block Size", - "description": "Size of a cache block in number of tokens.", - "type": "integer", - "nullable": true - }, - "gpu_memory_utilization": { - "title": "Gpu Memory Utilization", - "description": "Fraction of GPU memory to use for the vLLM execution.", - "type": "number", - "nullable": true - }, - "swap_space": { - "title": "Swap Space", - "description": "Size of the CPU swap space per GPU (in GiB).", - "type": "number", - "nullable": true - }, - "cache_dtype": { - "title": "Cache Dtype", - "description": "Data type for kv cache storage.", - "type": "string", - "nullable": true - }, - "num_gpu_blocks_override": { - "title": "Num Gpu Blocks Override", - "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", - "type": "integer", - "nullable": true - }, - "enable_prefix_caching": { - "title": "Enable Prefix Caching", - "description": "Enables automatic prefix caching.", - "type": "boolean", - "nullable": true - }, - "model": { - "type": "string", - "title": "Model", - "description": "ID of the model to use.", - "example": "mixtral-8x7b-instruct" - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "description": "Path to the checkpoint to load the model from.", - "type": "string", - "nullable": true - }, - "num_shards": { - "title": "Num Shards", - "description": "\nSuggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config.\nSystem may decide to use a different number than the given value.\n", - "default": 1, - "type": "integer", - "minimum": 1.0, - "nullable": true - }, - "max_context_length": { - "title": "Max Context Length", - "description": "Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len.", - "type": "integer", - "minimum": 1.0, - "nullable": true - }, - "response_role": { - "title": "Response Role", - "description": "Role of the response in the conversation. Only supported in chat completions.", - "type": "string", - "nullable": true - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels", - "description": "Labels to attach to the batch inference job.", - "default": {} - } - }, - "type": "object", - "required": [ - "model" - ], - "title": "CreateBatchCompletionsV1ModelConfig" - }, - "CreateBatchCompletionsV1Request": { - "properties": { - "input_data_path": { - "title": "Input Data Path", - "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", - "type": "string", - "nullable": true - }, - "output_data_path": { - "type": "string", - "title": "Output Data Path", - "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels", - "description": "Labels to attach to the batch inference job.", - "default": {} - }, - "data_parallelism": { - "title": "Data Parallelism", - "description": "Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference.", - "default": 1, - "type": "integer", - "maximum": 64.0, - "minimum": 1.0, - "nullable": true - }, - "max_runtime_sec": { - "title": "Max Runtime Sec", - "description": "Maximum runtime of the batch inference in seconds. Default to one day.", - "default": 86400, - "type": "integer", - "maximum": 172800.0, - "minimum": 1.0, - "nullable": true - }, - "priority": { - "title": "Priority", - "description": "Priority of the batch inference job. Default to None.", - "type": "string", - "nullable": true - }, - "tool_config": { - "description": "\nConfiguration for tool use.\nNOTE: this config is highly experimental and signature will change significantly in future iterations.", - "$ref": "#/components/schemas/ToolConfig", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "description": "CPUs to use for the batch inference.", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "description": "Number of GPUs to use for the batch inference.", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "description": "Amount of memory to use for the batch inference.", - "nullable": true - }, - "gpu_type": { - "description": "GPU type to use for the batch inference.", - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "description": "Storage to use for the batch inference.", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "description": "Number of nodes per worker for the batch inference.", - "type": "integer", - "nullable": true - }, - "content": { - "$ref": "#/components/schemas/CreateBatchCompletionsV1RequestContent", - "nullable": true - }, - "model_config": { - "$ref": "#/components/schemas/CreateBatchCompletionsV1ModelConfig" - } - }, - "type": "object", - "required": [ - "output_data_path", - "model_config" - ], - "title": "CreateBatchCompletionsV1Request", - "description": "Request object for batch completions." - }, - "CreateBatchCompletionsV1RequestContent": { - "properties": { - "prompts": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Prompts" - }, - "max_new_tokens": { - "type": "integer", - "title": "Max New Tokens" - }, - "temperature": { - "type": "number", - "maximum": 1.0, - "minimum": 0.0, - "title": "Temperature" - }, - "stop_sequences": { - "title": "Stop Sequences", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "return_token_log_probs": { - "title": "Return Token Log Probs", - "default": false, - "type": "boolean", - "nullable": true - }, - "presence_penalty": { - "title": "Presence Penalty", - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true - }, - "frequency_penalty": { - "title": "Frequency Penalty", - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true - }, - "top_k": { - "title": "Top K", - "type": "integer", - "minimum": -1.0, - "nullable": true - }, - "top_p": { - "title": "Top P", - "type": "number", - "maximum": 1.0, - "exclusiveMinimum": 0.0, - "nullable": true - }, - "skip_special_tokens": { - "title": "Skip Special Tokens", - "default": true, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "prompts", - "max_new_tokens", - "temperature" - ], - "title": "CreateBatchCompletionsV1RequestContent" - }, - "CreateBatchCompletionsV1Response": { - "properties": { - "job_id": { - "type": "string", - "title": "Job Id" - } - }, - "type": "object", - "required": [ - "job_id" - ], - "title": "CreateBatchCompletionsV1Response" - }, - "CreateBatchCompletionsV2Request": { - "properties": { - "input_data_path": { - "title": "Input Data Path", - "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", - "type": "string", - "nullable": true - }, - "output_data_path": { - "type": "string", - "title": "Output Data Path", - "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels", - "description": "Labels to attach to the batch inference job.", - "default": {} - }, - "data_parallelism": { - "title": "Data Parallelism", - "description": "Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference.", - "default": 1, - "type": "integer", - "maximum": 64.0, - "minimum": 1.0, - "nullable": true - }, - "max_runtime_sec": { - "title": "Max Runtime Sec", - "description": "Maximum runtime of the batch inference in seconds. Default to one day.", - "default": 86400, - "type": "integer", - "maximum": 172800.0, - "minimum": 1.0, - "nullable": true - }, - "priority": { - "title": "Priority", - "description": "Priority of the batch inference job. Default to None.", - "type": "string", - "nullable": true - }, - "tool_config": { - "description": "\nConfiguration for tool use.\nNOTE: this config is highly experimental and signature will change significantly in future iterations.", - "$ref": "#/components/schemas/ToolConfig", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "description": "CPUs to use for the batch inference.", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "description": "Number of GPUs to use for the batch inference.", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "description": "Amount of memory to use for the batch inference.", - "nullable": true - }, - "gpu_type": { - "description": "GPU type to use for the batch inference.", - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "description": "Storage to use for the batch inference.", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "description": "Number of nodes per worker for the batch inference.", - "type": "integer", - "nullable": true - }, - "content": { - "anyOf": [ - { - "$ref": "#/components/schemas/CreateBatchCompletionsV1RequestContent" - }, - { - "items": { - "$ref": "#/components/schemas/FilteredCompletionV2Request" - }, - "type": "array" - }, - { - "items": { - "$ref": "#/components/schemas/FilteredChatCompletionV2Request" - }, - "type": "array" - } - ], - "title": "Content", - "description": "\nEither `input_data_path` or `content` needs to be provided.\nWhen input_data_path is provided, the input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].\n", - "nullable": true - }, - "model_config": { - "$ref": "#/components/schemas/BatchCompletionsModelConfig", - "description": "Model configuration for the batch inference. Hardware configurations are inferred." - } - }, - "type": "object", - "required": [ - "output_data_path", - "model_config" - ], - "title": "CreateBatchCompletionsV2Request", - "description": "Request object for batch completions." - }, - "CreateBatchJobResourceRequests": { - "properties": { - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "max_workers": { - "title": "Max Workers", - "type": "integer", - "nullable": true - }, - "per_worker": { - "title": "Per Worker", - "type": "integer", - "nullable": true - }, - "concurrent_requests_per_worker": { - "title": "Concurrent Requests Per Worker", - "type": "integer", - "nullable": true - } - }, - "type": "object", - "title": "CreateBatchJobResourceRequests" - }, - "CreateBatchJobV1Request": { - "properties": { - "model_bundle_id": { - "type": "string", - "title": "Model Bundle Id" - }, - "input_path": { - "type": "string", - "title": "Input Path" - }, - "serialization_format": { - "$ref": "#/components/schemas/BatchJobSerializationFormat" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "resource_requests": { - "$ref": "#/components/schemas/CreateBatchJobResourceRequests" - }, - "timeout_seconds": { - "type": "number", - "title": "Timeout Seconds", - "default": 43200.0 - } - }, - "type": "object", - "required": [ - "model_bundle_id", - "input_path", - "serialization_format", - "labels", - "resource_requests" - ], - "title": "CreateBatchJobV1Request" - }, - "CreateBatchJobV1Response": { - "properties": { - "job_id": { - "type": "string", - "title": "Job Id" - } - }, - "type": "object", - "required": [ - "job_id" - ], - "title": "CreateBatchJobV1Response" - }, - "CreateChatCompletionResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "A unique identifier for the chat completion." - }, - "choices": { - "items": { - "$ref": "#/components/schemas/Choice" - }, - "type": "array", - "title": "Choices", - "description": "A list of chat completion choices. Can be more than one if `n` is greater than 1." - }, - "created": { - "type": "integer", - "title": "Created", - "description": "The Unix timestamp (in seconds) of when the chat completion was created." - }, - "model": { - "type": "string", - "title": "Model", - "description": "The model used for the chat completion." - }, - "service_tier": { - "$ref": "#/components/schemas/ServiceTier", - "nullable": true - }, - "system_fingerprint": { - "title": "System Fingerprint", - "description": "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n", - "type": "string", - "nullable": true - }, - "object": { - "type": "string", - "title": "Object", - "description": "The object type, which is always `chat.completion`.", - "enum": [ - "chat.completion" - ] - }, - "usage": { - "$ref": "#/components/schemas/CompletionUsage", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "choices", - "created", - "model", - "object" - ], - "title": "CreateChatCompletionResponse" - }, - "CreateChatCompletionStreamResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "A unique identifier for the chat completion. Each chunk has the same ID." - }, - "choices": { - "items": { - "$ref": "#/components/schemas/Choice1" - }, - "type": "array", - "title": "Choices", - "description": "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" - }, - "created": { - "type": "integer", - "title": "Created", - "description": "The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp." - }, - "model": { - "type": "string", - "title": "Model", - "description": "The model to generate the completion." - }, - "service_tier": { - "$ref": "#/components/schemas/ServiceTier", - "nullable": true - }, - "system_fingerprint": { - "title": "System Fingerprint", - "description": "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n", - "type": "string", - "nullable": true - }, - "object": { - "type": "string", - "title": "Object", - "description": "The object type, which is always `chat.completion.chunk`.", - "enum": [ - "chat.completion.chunk" - ] - }, - "usage": { - "description": "An optional field that will only be present when you set\n`stream_options: {\"include_usage\": true}` in your request. When present, it\ncontains a null value **except for the last chunk** which contains the\ntoken usage statistics for the entire request.\n\n**NOTE:** If the stream is interrupted or cancelled, you may not\nreceive the final usage chunk which contains the total token usage for\nthe request.\n", - "$ref": "#/components/schemas/CompletionUsage", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "choices", - "created", - "model", - "object" - ], - "title": "CreateChatCompletionStreamResponse" - }, - "CreateCompletionResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "A unique identifier for the completion." - }, - "choices": { - "items": { - "$ref": "#/components/schemas/Choice2" - }, - "type": "array", - "title": "Choices", - "description": "The list of completion choices the model generated for the input prompt." - }, - "created": { - "type": "integer", - "title": "Created", - "description": "The Unix timestamp (in seconds) of when the completion was created." - }, - "model": { - "type": "string", - "title": "Model", - "description": "The model used for completion." - }, - "system_fingerprint": { - "title": "System Fingerprint", - "description": "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n", - "type": "string", - "nullable": true - }, - "object": { - "type": "string", - "title": "Object", - "description": "The object type, which is always \"text_completion\"", - "enum": [ - "text_completion" - ] - }, - "usage": { - "$ref": "#/components/schemas/CompletionUsage", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "choices", - "created", - "model", - "object" - ], - "title": "CreateCompletionResponse" - }, - "CreateDeepSpeedModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name" - }, - "model_name": { - "type": "string", - "title": "Model Name" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "min_workers": { - "type": "integer", - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "title": "Per Worker" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "default": "hugging_face" - }, - "inference_framework_image_tag": { - "type": "string", - "title": "Inference Framework Image Tag", - "default": "latest" - }, - "num_shards": { - "type": "integer", - "title": "Num Shards", - "default": 1 - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType", - "default": "sync" - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "deepspeed", - "enum": [ - "deepspeed" - ] - } - }, - "type": "object", - "required": [ - "name", - "model_name", - "metadata", - "min_workers", - "max_workers", - "per_worker", - "labels" - ], - "title": "CreateDeepSpeedModelEndpointRequest" - }, - "CreateDockerImageBatchJobBundleV1Request": { - "properties": { - "name": { - "type": "string", - "title": "Name" - }, - "image_repository": { - "type": "string", - "title": "Image Repository" - }, - "image_tag": { - "type": "string", - "title": "Image Tag" - }, - "command": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Command" - }, - "env": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Env", - "default": {} - }, - "mount_location": { - "title": "Mount Location", - "type": "string", - "nullable": true - }, - "resource_requests": { - "$ref": "#/components/schemas/CreateDockerImageBatchJobResourceRequests", - "default": {} - }, - "public": { - "title": "Public", - "default": false, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "name", - "image_repository", - "image_tag", - "command" - ], - "title": "CreateDockerImageBatchJobBundleV1Request" - }, - "CreateDockerImageBatchJobBundleV1Response": { - "properties": { - "docker_image_batch_job_bundle_id": { - "type": "string", - "title": "Docker Image Batch Job Bundle Id" - } - }, - "type": "object", - "required": [ - "docker_image_batch_job_bundle_id" - ], - "title": "CreateDockerImageBatchJobBundleV1Response" - }, - "CreateDockerImageBatchJobResourceRequests": { - "properties": { - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - } - }, - "type": "object", - "title": "CreateDockerImageBatchJobResourceRequests" - }, - "CreateDockerImageBatchJobV1Request": { - "properties": { - "docker_image_batch_job_bundle_name": { - "title": "Docker Image Batch Job Bundle Name", - "type": "string", - "nullable": true - }, - "docker_image_batch_job_bundle_id": { - "title": "Docker Image Batch Job Bundle Id", - "type": "string", - "nullable": true - }, - "job_config": { - "title": "Job Config", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "resource_requests": { - "$ref": "#/components/schemas/CreateDockerImageBatchJobResourceRequests", - "default": {} - }, - "override_job_max_runtime_s": { - "title": "Override Job Max Runtime S", - "type": "integer", - "nullable": true - } - }, - "type": "object", - "required": [ - "labels" - ], - "title": "CreateDockerImageBatchJobV1Request" - }, - "CreateDockerImageBatchJobV1Response": { - "properties": { - "job_id": { - "type": "string", - "title": "Job Id" - } - }, - "type": "object", - "required": [ - "job_id" - ], - "title": "CreateDockerImageBatchJobV1Response" - }, - "CreateFineTuneRequest": { - "properties": { - "model": { - "type": "string", - "title": "Model" - }, - "training_file": { - "type": "string", - "title": "Training File" - }, - "validation_file": { - "title": "Validation File", - "type": "string", - "nullable": true - }, - "hyperparameters": { - "additionalProperties": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - }, - { - "additionalProperties": true, - "type": "object" - } - ] - }, - "type": "object", - "title": "Hyperparameters" - }, - "suffix": { - "title": "Suffix", - "type": "string", - "nullable": true - }, - "wandb_config": { - "title": "Wandb Config", - "additionalProperties": true, - "type": "object", - "nullable": true - } - }, - "type": "object", - "required": [ - "model", - "training_file", - "hyperparameters" - ], - "title": "CreateFineTuneRequest" - }, - "CreateFineTuneResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id" - } - }, - "type": "object", - "required": [ - "id" - ], - "title": "CreateFineTuneResponse" - }, - "CreateLLMModelEndpointV1Response": { - "properties": { - "endpoint_creation_task_id": { - "type": "string", - "title": "Endpoint Creation Task Id" - } - }, - "type": "object", - "required": [ - "endpoint_creation_task_id" - ], - "title": "CreateLLMModelEndpointV1Response" - }, - "CreateLightLLMModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name" - }, - "model_name": { - "type": "string", - "title": "Model Name" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "min_workers": { - "type": "integer", - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "title": "Per Worker" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "default": "hugging_face" - }, - "inference_framework_image_tag": { - "type": "string", - "title": "Inference Framework Image Tag", - "default": "latest" - }, - "num_shards": { - "type": "integer", - "title": "Num Shards", - "default": 1 - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType", - "default": "sync" - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "lightllm", - "enum": [ - "lightllm" - ] - } - }, - "type": "object", - "required": [ - "name", - "model_name", - "metadata", - "min_workers", - "max_workers", - "per_worker", - "labels" - ], - "title": "CreateLightLLMModelEndpointRequest" - }, - "CreateModelBundleV1Request": { - "properties": { - "name": { - "type": "string", - "title": "Name" - }, - "location": { - "type": "string", - "title": "Location" - }, - "requirements": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Requirements" - }, - "env_params": { - "$ref": "#/components/schemas/ModelBundleEnvironmentParams" - }, - "packaging_type": { - "$ref": "#/components/schemas/ModelBundlePackagingType" - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "app_config": { - "title": "App Config", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "schema_location": { - "title": "Schema Location", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "name", - "location", - "requirements", - "env_params", - "packaging_type" - ], - "title": "CreateModelBundleV1Request", - "description": "Request object for creating a Model Bundle." - }, - "CreateModelBundleV1Response": { - "properties": { - "model_bundle_id": { - "type": "string", - "title": "Model Bundle Id" - } - }, - "type": "object", - "required": [ - "model_bundle_id" - ], - "title": "CreateModelBundleV1Response", - "description": "Response object for creating a Model Bundle." - }, - "CreateModelBundleV2Request": { - "properties": { - "name": { - "type": "string", - "title": "Name" - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "schema_location": { - "type": "string", - "title": "Schema Location" - }, - "flavor": { - "oneOf": [ - { - "$ref": "#/components/schemas/CloudpickleArtifactFlavor" - }, - { - "$ref": "#/components/schemas/ZipArtifactFlavor" - }, - { - "$ref": "#/components/schemas/RunnableImageFlavor" - }, - { - "$ref": "#/components/schemas/StreamingEnhancedRunnableImageFlavor" - }, - { - "$ref": "#/components/schemas/TritonEnhancedRunnableImageFlavor" - } - ], - "title": "Flavor", - "discriminator": { - "propertyName": "flavor", - "mapping": { - "cloudpickle_artifact": "#/components/schemas/CloudpickleArtifactFlavor", - "runnable_image": "#/components/schemas/RunnableImageFlavor", - "streaming_enhanced_runnable_image": "#/components/schemas/StreamingEnhancedRunnableImageFlavor", - "triton_enhanced_runnable_image": "#/components/schemas/TritonEnhancedRunnableImageFlavor", - "zip_artifact": "#/components/schemas/ZipArtifactFlavor" - } - } - } - }, - "type": "object", - "required": [ - "name", - "schema_location", - "flavor" - ], - "title": "CreateModelBundleV2Request", - "description": "Request object for creating a Model Bundle." - }, - "CreateModelBundleV2Response": { - "properties": { - "model_bundle_id": { - "type": "string", - "title": "Model Bundle Id" - } - }, - "type": "object", - "required": [ - "model_bundle_id" - ], - "title": "CreateModelBundleV2Response", - "description": "Response object for creating a Model Bundle." - }, - "CreateModelEndpointV1Request": { - "properties": { - "name": { - "type": "string", - "maxLength": 63, - "title": "Name" - }, - "model_bundle_id": { - "type": "string", - "title": "Model Bundle Id" - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus" - }, - "gpus": { - "type": "integer", - "minimum": 0.0, - "title": "Gpus" - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory" - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage" - }, - "nodes_per_worker": { - "type": "integer", - "exclusiveMinimum": 0.0, - "title": "Nodes Per Worker", - "default": 1 - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "min_workers": { - "type": "integer", - "minimum": 0.0, - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "minimum": 0.0, - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "exclusiveMinimum": 0.0, - "title": "Per Worker" - }, - "concurrent_requests_per_worker": { - "title": "Concurrent Requests Per Worker", - "type": "integer", - "exclusiveMinimum": 0.0, - "nullable": true - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": false, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "name", - "model_bundle_id", - "endpoint_type", - "metadata", - "cpus", - "gpus", - "memory", - "storage", - "min_workers", - "max_workers", - "per_worker", - "labels" - ], - "title": "CreateModelEndpointV1Request" - }, - "CreateModelEndpointV1Response": { - "properties": { - "endpoint_creation_task_id": { - "type": "string", - "title": "Endpoint Creation Task Id" - } - }, - "type": "object", - "required": [ - "endpoint_creation_task_id" - ], - "title": "CreateModelEndpointV1Response" - }, - "CreateSGLangModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name" - }, - "model_name": { - "type": "string", - "title": "Model Name" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "min_workers": { - "type": "integer", - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "title": "Per Worker" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "default": "hugging_face" - }, - "inference_framework_image_tag": { - "type": "string", - "title": "Inference Framework Image Tag", - "default": "latest" - }, - "num_shards": { - "type": "integer", - "title": "Num Shards", - "default": 1 - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType", - "default": "sync" - }, - "trust_remote_code": { - "title": "Trust Remote Code", - "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", - "default": false, - "type": "boolean", - "nullable": true - }, - "tp_size": { - "title": "Tp Size", - "description": "The tensor parallel size.", - "type": "integer", - "nullable": true - }, - "skip_tokenizer_init": { - "title": "Skip Tokenizer Init", - "description": "If set, skip init tokenizer and pass input_ids in generate request", - "type": "boolean", - "nullable": true - }, - "load_format": { - "title": "Load Format", - "description": "The format of the model weights to load.", - "type": "string", - "nullable": true - }, - "dtype": { - "title": "Dtype", - "description": "Data type for model weights and activations.", - "type": "string", - "nullable": true - }, - "kv_cache_dtype": { - "title": "Kv Cache Dtype", - "description": "Data type for kv cache storage. \"auto\" will use model data type.", - "type": "string", - "nullable": true - }, - "quantization_param_path": { - "title": "Quantization Param Path", - "description": "Path to the JSON file containing the KV cache scaling factors.", - "type": "string", - "nullable": true - }, - "quantization": { - "title": "Quantization", - "description": "The quantization method.", - "type": "string", - "nullable": true - }, - "context_length": { - "title": "Context Length", - "description": "The model's maximum context length.", - "type": "integer", - "nullable": true - }, - "device": { - "title": "Device", - "description": "The device type.", - "type": "string", - "nullable": true - }, - "served_model_name": { - "title": "Served Model Name", - "description": "Override the model name returned by the v1/models endpoint in OpenAI API server.", - "type": "string", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "The builtin chat template name or path of the chat template file.", - "type": "string", - "nullable": true - }, - "is_embedding": { - "title": "Is Embedding", - "description": "Whether to use a CausalLM as an embedding model.", - "type": "boolean", - "nullable": true - }, - "revision": { - "title": "Revision", - "description": "The specific model version to use.", - "type": "string", - "nullable": true - }, - "mem_fraction_static": { - "title": "Mem Fraction Static", - "description": "The fraction of the memory used for static allocation.", - "type": "number", - "nullable": true - }, - "max_running_requests": { - "title": "Max Running Requests", - "description": "The maximum number of running requests.", - "type": "integer", - "nullable": true - }, - "max_total_tokens": { - "title": "Max Total Tokens", - "description": "The maximum number of tokens in the memory pool.", - "type": "integer", - "nullable": true - }, - "chunked_prefill_size": { - "title": "Chunked Prefill Size", - "description": "The maximum number of tokens in a chunk for the chunked prefill.", - "type": "integer", - "nullable": true - }, - "max_prefill_tokens": { - "title": "Max Prefill Tokens", - "description": "The maximum number of tokens in a prefill batch.", - "type": "integer", - "nullable": true - }, - "schedule_policy": { - "title": "Schedule Policy", - "description": "The scheduling policy of the requests.", - "type": "string", - "nullable": true - }, - "schedule_conservativeness": { - "title": "Schedule Conservativeness", - "description": "How conservative the schedule policy is.", - "type": "number", - "nullable": true - }, - "cpu_offload_gb": { - "title": "Cpu Offload Gb", - "description": "How many GBs of RAM to reserve for CPU offloading", - "type": "integer", - "nullable": true - }, - "prefill_only_one_req": { - "title": "Prefill Only One Req", - "description": "If true, we only prefill one request at one prefill batch", - "type": "boolean", - "nullable": true - }, - "stream_interval": { - "title": "Stream Interval", - "description": "The interval for streaming in terms of the token length.", - "type": "integer", - "nullable": true - }, - "random_seed": { - "title": "Random Seed", - "description": "The random seed.", - "type": "integer", - "nullable": true - }, - "constrained_json_whitespace_pattern": { - "title": "Constrained Json Whitespace Pattern", - "description": "Regex pattern for syntactic whitespaces allowed in JSON constrained output.", - "type": "string", - "nullable": true - }, - "watchdog_timeout": { - "title": "Watchdog Timeout", - "description": "Set watchdog timeout in seconds.", - "type": "number", - "nullable": true - }, - "download_dir": { - "title": "Download Dir", - "description": "Model download directory.", - "type": "string", - "nullable": true - }, - "base_gpu_id": { - "title": "Base Gpu Id", - "description": "The base GPU ID to start allocating GPUs from.", - "type": "integer", - "nullable": true - }, - "log_level": { - "title": "Log Level", - "description": "The logging level of all loggers.", - "type": "string", - "nullable": true - }, - "log_level_http": { - "title": "Log Level Http", - "description": "The logging level of HTTP server.", - "type": "string", - "nullable": true - }, - "log_requests": { - "title": "Log Requests", - "description": "Log the inputs and outputs of all requests.", - "type": "boolean", - "nullable": true - }, - "show_time_cost": { - "title": "Show Time Cost", - "description": "Show time cost of custom marks.", - "type": "boolean", - "nullable": true - }, - "enable_metrics": { - "title": "Enable Metrics", - "description": "Enable log prometheus metrics.", - "type": "boolean", - "nullable": true - }, - "decode_log_interval": { - "title": "Decode Log Interval", - "description": "The log interval of decode batch.", - "type": "integer", - "nullable": true - }, - "api_key": { - "title": "Api Key", - "description": "Set API key of the server.", - "type": "string", - "nullable": true - }, - "file_storage_pth": { - "title": "File Storage Pth", - "description": "The path of the file storage in backend.", - "type": "string", - "nullable": true - }, - "enable_cache_report": { - "title": "Enable Cache Report", - "description": "Return number of cached tokens in usage.prompt_tokens_details.", - "type": "boolean", - "nullable": true - }, - "data_parallel_size": { - "title": "Data Parallel Size", - "description": "The data parallelism size.", - "type": "integer", - "nullable": true - }, - "load_balance_method": { - "title": "Load Balance Method", - "description": "The load balancing strategy for data parallelism.", - "type": "string", - "nullable": true - }, - "expert_parallel_size": { - "title": "Expert Parallel Size", - "description": "The expert parallelism size.", - "type": "integer", - "nullable": true - }, - "dist_init_addr": { - "title": "Dist Init Addr", - "description": "The host address for initializing distributed backend.", - "type": "string", - "nullable": true - }, - "nnodes": { - "title": "Nnodes", - "description": "The number of nodes.", - "type": "integer", - "nullable": true - }, - "node_rank": { - "title": "Node Rank", - "description": "The node rank.", - "type": "integer", - "nullable": true - }, - "json_model_override_args": { - "title": "Json Model Override Args", - "description": "A dictionary in JSON string format used to override default model configurations.", - "type": "string", - "nullable": true - }, - "lora_paths": { - "title": "Lora Paths", - "description": "The list of LoRA adapters.", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "max_loras_per_batch": { - "title": "Max Loras Per Batch", - "description": "Maximum number of adapters for a running batch.", - "type": "integer", - "nullable": true - }, - "attention_backend": { - "title": "Attention Backend", - "description": "Choose the kernels for attention layers.", - "type": "string", - "nullable": true - }, - "sampling_backend": { - "title": "Sampling Backend", - "description": "Choose the kernels for sampling layers.", - "type": "string", - "nullable": true - }, - "grammar_backend": { - "title": "Grammar Backend", - "description": "Choose the backend for grammar-guided decoding.", - "type": "string", - "nullable": true - }, - "speculative_algorithm": { - "title": "Speculative Algorithm", - "description": "Speculative algorithm.", - "type": "string", - "nullable": true - }, - "speculative_draft_model_path": { - "title": "Speculative Draft Model Path", - "description": "The path of the draft model weights.", - "type": "string", - "nullable": true - }, - "speculative_num_steps": { - "title": "Speculative Num Steps", - "description": "The number of steps sampled from draft model in Speculative Decoding.", - "type": "integer", - "nullable": true - }, - "speculative_num_draft_tokens": { - "title": "Speculative Num Draft Tokens", - "description": "The number of token sampled from draft model in Speculative Decoding.", - "type": "integer", - "nullable": true - }, - "speculative_eagle_topk": { - "title": "Speculative Eagle Topk", - "description": "The number of token sampled from draft model in eagle2 each step.", - "type": "integer", - "nullable": true - }, - "enable_double_sparsity": { - "title": "Enable Double Sparsity", - "description": "Enable double sparsity attention", - "type": "boolean", - "nullable": true - }, - "ds_channel_config_path": { - "title": "Ds Channel Config Path", - "description": "The path of the double sparsity channel config", - "type": "string", - "nullable": true - }, - "ds_heavy_channel_num": { - "title": "Ds Heavy Channel Num", - "description": "The number of heavy channels in double sparsity attention", - "type": "integer", - "nullable": true - }, - "ds_heavy_token_num": { - "title": "Ds Heavy Token Num", - "description": "The number of heavy tokens in double sparsity attention", - "type": "integer", - "nullable": true - }, - "ds_heavy_channel_type": { - "title": "Ds Heavy Channel Type", - "description": "The type of heavy channels in double sparsity attention", - "type": "string", - "nullable": true - }, - "ds_sparse_decode_threshold": { - "title": "Ds Sparse Decode Threshold", - "description": "The threshold for sparse decoding in double sparsity attention", - "type": "integer", - "nullable": true - }, - "disable_radix_cache": { - "title": "Disable Radix Cache", - "description": "Disable RadixAttention for prefix caching.", - "type": "boolean", - "nullable": true - }, - "disable_jump_forward": { - "title": "Disable Jump Forward", - "description": "Disable jump-forward for grammar-guided decoding.", - "type": "boolean", - "nullable": true - }, - "disable_cuda_graph": { - "title": "Disable Cuda Graph", - "description": "Disable cuda graph.", - "type": "boolean", - "nullable": true - }, - "disable_cuda_graph_padding": { - "title": "Disable Cuda Graph Padding", - "description": "Disable cuda graph when padding is needed.", - "type": "boolean", - "nullable": true - }, - "disable_outlines_disk_cache": { - "title": "Disable Outlines Disk Cache", - "description": "Disable disk cache of outlines.", - "type": "boolean", - "nullable": true - }, - "disable_custom_all_reduce": { - "title": "Disable Custom All Reduce", - "description": "Disable the custom all-reduce kernel.", - "type": "boolean", - "nullable": true - }, - "disable_mla": { - "title": "Disable Mla", - "description": "Disable Multi-head Latent Attention (MLA) for DeepSeek-V2.", - "type": "boolean", - "nullable": true - }, - "disable_overlap_schedule": { - "title": "Disable Overlap Schedule", - "description": "Disable the overlap scheduler.", - "type": "boolean", - "nullable": true - }, - "enable_mixed_chunk": { - "title": "Enable Mixed Chunk", - "description": "Enable mixing prefill and decode in a batch when using chunked prefill.", - "type": "boolean", - "nullable": true - }, - "enable_dp_attention": { - "title": "Enable Dp Attention", - "description": "Enable data parallelism for attention and tensor parallelism for FFN.", - "type": "boolean", - "nullable": true - }, - "enable_ep_moe": { - "title": "Enable Ep Moe", - "description": "Enable expert parallelism for moe.", - "type": "boolean", - "nullable": true - }, - "enable_torch_compile": { - "title": "Enable Torch Compile", - "description": "Optimize the model with torch.compile.", - "type": "boolean", - "nullable": true - }, - "torch_compile_max_bs": { - "title": "Torch Compile Max Bs", - "description": "Set the maximum batch size when using torch compile.", - "type": "integer", - "nullable": true - }, - "cuda_graph_max_bs": { - "title": "Cuda Graph Max Bs", - "description": "Set the maximum batch size for cuda graph.", - "type": "integer", - "nullable": true - }, - "cuda_graph_bs": { - "title": "Cuda Graph Bs", - "description": "Set the list of batch sizes for cuda graph.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "torchao_config": { - "title": "Torchao Config", - "description": "Optimize the model with torchao.", - "type": "string", - "nullable": true - }, - "enable_nan_detection": { - "title": "Enable Nan Detection", - "description": "Enable the NaN detection for debugging purposes.", - "type": "boolean", - "nullable": true - }, - "enable_p2p_check": { - "title": "Enable P2P Check", - "description": "Enable P2P check for GPU access.", - "type": "boolean", - "nullable": true - }, - "triton_attention_reduce_in_fp32": { - "title": "Triton Attention Reduce In Fp32", - "description": "Cast the intermediate attention results to fp32.", - "type": "boolean", - "nullable": true - }, - "triton_attention_num_kv_splits": { - "title": "Triton Attention Num Kv Splits", - "description": "The number of KV splits in flash decoding Triton kernel.", - "type": "integer", - "nullable": true - }, - "num_continuous_decode_steps": { - "title": "Num Continuous Decode Steps", - "description": "Run multiple continuous decoding steps to reduce scheduling overhead.", - "type": "integer", - "nullable": true - }, - "delete_ckpt_after_loading": { - "title": "Delete Ckpt After Loading", - "description": "Delete the model checkpoint after loading the model.", - "type": "boolean", - "nullable": true - }, - "enable_memory_saver": { - "title": "Enable Memory Saver", - "description": "Allow saving memory using release_memory_occupation and resume_memory_occupation", - "type": "boolean", - "nullable": true - }, - "allow_auto_truncate": { - "title": "Allow Auto Truncate", - "description": "Allow automatically truncating requests that exceed the maximum input length.", - "type": "boolean", - "nullable": true - }, - "enable_custom_logit_processor": { - "title": "Enable Custom Logit Processor", - "description": "Enable users to pass custom logit processors to the server.", - "type": "boolean", - "nullable": true - }, - "tool_call_parser": { - "title": "Tool Call Parser", - "description": "Specify the parser for handling tool-call interactions.", - "type": "string", - "nullable": true - }, - "huggingface_repo": { - "title": "Huggingface Repo", - "description": "The Hugging Face repository ID.", - "type": "string", - "nullable": true - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "sglang", - "enum": [ - "sglang" - ] - } - }, - "type": "object", - "required": [ - "name", - "model_name", - "metadata", - "min_workers", - "max_workers", - "per_worker", - "labels" - ], - "title": "CreateSGLangModelEndpointRequest" - }, - "CreateTensorRTLLMModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name" - }, - "model_name": { - "type": "string", - "title": "Model Name" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "min_workers": { - "type": "integer", - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "title": "Per Worker" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "default": "hugging_face" - }, - "inference_framework_image_tag": { - "type": "string", - "title": "Inference Framework Image Tag", - "default": "latest" - }, - "num_shards": { - "type": "integer", - "title": "Num Shards", - "default": 1 - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType", - "default": "sync" - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "tensorrt_llm", - "enum": [ - "tensorrt_llm" - ] - } - }, - "type": "object", - "required": [ - "name", - "model_name", - "metadata", - "min_workers", - "max_workers", - "per_worker", - "labels" - ], - "title": "CreateTensorRTLLMModelEndpointRequest" - }, - "CreateTextGenerationInferenceModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name" - }, - "model_name": { - "type": "string", - "title": "Model Name" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "min_workers": { - "type": "integer", - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "title": "Per Worker" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "default": "hugging_face" - }, - "inference_framework_image_tag": { - "type": "string", - "title": "Inference Framework Image Tag", - "default": "latest" - }, - "num_shards": { - "type": "integer", - "title": "Num Shards", - "default": 1 - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType", - "default": "sync" - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "text_generation_inference", - "enum": [ - "text_generation_inference" - ] - } - }, - "type": "object", - "required": [ - "name", - "model_name", - "metadata", - "min_workers", - "max_workers", - "per_worker", - "labels" - ], - "title": "CreateTextGenerationInferenceModelEndpointRequest" - }, - "CreateTriggerV1Request": { - "properties": { - "name": { - "type": "string", - "title": "Name" - }, - "cron_schedule": { - "type": "string", - "title": "Cron Schedule" - }, - "bundle_id": { - "type": "string", - "title": "Bundle Id" - }, - "default_job_config": { - "title": "Default Job Config", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_job_metadata": { - "title": "Default Job Metadata", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - } - }, - "type": "object", - "required": [ - "name", - "cron_schedule", - "bundle_id" - ], - "title": "CreateTriggerV1Request" - }, - "CreateTriggerV1Response": { - "properties": { - "trigger_id": { - "type": "string", - "title": "Trigger Id" - } - }, - "type": "object", - "required": [ - "trigger_id" - ], - "title": "CreateTriggerV1Response" - }, - "CreateVLLMModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name" - }, - "model_name": { - "type": "string", - "title": "Model Name" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "min_workers": { - "type": "integer", - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "title": "Per Worker" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Labels" - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "default": "hugging_face" - }, - "inference_framework_image_tag": { - "type": "string", - "title": "Inference Framework Image Tag", - "default": "latest" - }, - "num_shards": { - "type": "integer", - "title": "Num Shards", - "default": 1 - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType", - "default": "sync" - }, - "max_gpu_memory_utilization": { - "title": "Max Gpu Memory Utilization", - "description": "Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig", - "type": "number", - "nullable": true - }, - "attention_backend": { - "title": "Attention Backend", - "description": "Attention backend to use for vLLM. Default to None.", - "type": "string", - "nullable": true - }, - "max_model_len": { - "title": "Max Model Len", - "description": "Model context length, If unspecified, will be automatically derived from the model config", - "type": "integer", - "nullable": true - }, - "max_num_seqs": { - "title": "Max Num Seqs", - "description": "Maximum number of sequences per iteration", - "type": "integer", - "nullable": true - }, - "enforce_eager": { - "title": "Enforce Eager", - "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", - "type": "boolean", - "nullable": true - }, - "trust_remote_code": { - "title": "Trust Remote Code", - "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", - "default": false, - "type": "boolean", - "nullable": true - }, - "pipeline_parallel_size": { - "title": "Pipeline Parallel Size", - "description": "Number of pipeline stages. Default to None.", - "type": "integer", - "nullable": true - }, - "tensor_parallel_size": { - "title": "Tensor Parallel Size", - "description": "Number of tensor parallel replicas. Default to None.", - "type": "integer", - "nullable": true - }, - "quantization": { - "title": "Quantization", - "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", - "type": "string", - "nullable": true - }, - "disable_log_requests": { - "title": "Disable Log Requests", - "description": "Disable logging requests. Default to None.", - "type": "boolean", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "tool_call_parser": { - "title": "Tool Call Parser", - "description": "Tool call parser", - "type": "string", - "nullable": true - }, - "enable_auto_tool_choice": { - "title": "Enable Auto Tool Choice", - "description": "Enable auto tool choice", - "type": "boolean", - "nullable": true - }, - "load_format": { - "title": "Load Format", - "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", - "type": "string", - "nullable": true - }, - "config_format": { - "title": "Config Format", - "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", - "type": "string", - "nullable": true - }, - "tokenizer_mode": { - "title": "Tokenizer Mode", - "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", - "type": "string", - "nullable": true - }, - "limit_mm_per_prompt": { - "title": "Limit Mm Per Prompt", - "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", - "type": "string", - "nullable": true - }, - "max_num_batched_tokens": { - "title": "Max Num Batched Tokens", - "description": "Maximum number of batched tokens per iteration", - "type": "integer", - "nullable": true - }, - "tokenizer": { - "title": "Tokenizer", - "description": "Name or path of the huggingface tokenizer to use.", - "type": "string", - "nullable": true - }, - "dtype": { - "title": "Dtype", - "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", - "type": "string", - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "Random seed for reproducibility.", - "type": "integer", - "nullable": true - }, - "revision": { - "title": "Revision", - "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "code_revision": { - "title": "Code Revision", - "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "rope_scaling": { - "title": "Rope Scaling", - "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "tokenizer_revision": { - "title": "Tokenizer Revision", - "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "quantization_param_path": { - "title": "Quantization Param Path", - "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", - "type": "string", - "nullable": true - }, - "max_seq_len_to_capture": { - "title": "Max Seq Len To Capture", - "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", - "type": "integer", - "nullable": true - }, - "disable_sliding_window": { - "title": "Disable Sliding Window", - "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", - "type": "boolean", - "nullable": true - }, - "skip_tokenizer_init": { - "title": "Skip Tokenizer Init", - "description": "If true, skip initialization of tokenizer and detokenizer.", - "type": "boolean", - "nullable": true - }, - "served_model_name": { - "title": "Served Model Name", - "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", - "type": "string", - "nullable": true - }, - "override_neuron_config": { - "title": "Override Neuron Config", - "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "mm_processor_kwargs": { - "title": "Mm Processor Kwargs", - "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "block_size": { - "title": "Block Size", - "description": "Size of a cache block in number of tokens.", - "type": "integer", - "nullable": true - }, - "gpu_memory_utilization": { - "title": "Gpu Memory Utilization", - "description": "Fraction of GPU memory to use for the vLLM execution.", - "type": "number", - "nullable": true - }, - "swap_space": { - "title": "Swap Space", - "description": "Size of the CPU swap space per GPU (in GiB).", - "type": "number", - "nullable": true - }, - "cache_dtype": { - "title": "Cache Dtype", - "description": "Data type for kv cache storage.", - "type": "string", - "nullable": true - }, - "num_gpu_blocks_override": { - "title": "Num Gpu Blocks Override", - "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", - "type": "integer", - "nullable": true - }, - "enable_prefix_caching": { - "title": "Enable Prefix Caching", - "description": "Enables automatic prefix caching.", - "type": "boolean", - "nullable": true - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "vllm", - "enum": [ - "vllm" - ] - } - }, - "type": "object", - "required": [ - "name", - "model_name", - "metadata", - "min_workers", - "max_workers", - "per_worker", - "labels" - ], - "title": "CreateVLLMModelEndpointRequest" - }, - "CustomFramework": { - "properties": { - "framework_type": { - "type": "string", - "title": "Framework Type", - "enum": [ - "custom_base_image" - ] - }, - "image_repository": { - "type": "string", - "title": "Image Repository" - }, - "image_tag": { - "type": "string", - "title": "Image Tag" - } - }, - "type": "object", - "required": [ - "framework_type", - "image_repository", - "image_tag" - ], - "title": "CustomFramework", - "description": "This is the entity-layer class for a custom framework specification." - }, - "DeleteFileResponse": { - "properties": { - "deleted": { - "type": "boolean", - "title": "Deleted", - "description": "Whether deletion was successful." - } - }, - "type": "object", - "required": [ - "deleted" - ], - "title": "DeleteFileResponse", - "description": "Response object for deleting a file." - }, - "DeleteLLMEndpointResponse": { - "properties": { - "deleted": { - "type": "boolean", - "title": "Deleted" - } - }, - "type": "object", - "required": [ - "deleted" - ], - "title": "DeleteLLMEndpointResponse" - }, - "DeleteModelEndpointV1Response": { - "properties": { - "deleted": { - "type": "boolean", - "title": "Deleted" - } - }, - "type": "object", - "required": [ - "deleted" - ], - "title": "DeleteModelEndpointV1Response" - }, - "DeleteTriggerV1Response": { - "properties": { - "success": { - "type": "boolean", - "title": "Success" - } - }, - "type": "object", - "required": [ - "success" - ], - "title": "DeleteTriggerV1Response" - }, - "DockerImageBatchJob": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "created_by": { - "type": "string", - "title": "Created By" - }, - "owner": { - "type": "string", - "title": "Owner" - }, - "created_at": { - "type": "string", - "format": "date-time", - "title": "Created At" - }, - "completed_at": { - "title": "Completed At", - "type": "string", - "format": "date-time", - "nullable": true - }, - "status": { - "$ref": "#/components/schemas/BatchJobStatus" - }, - "annotations": { - "title": "Annotations", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "override_job_max_runtime_s": { - "title": "Override Job Max Runtime S", - "type": "integer", - "nullable": true - }, - "num_workers": { - "title": "Num Workers", - "default": 1, - "type": "integer", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "created_by", - "owner", - "created_at", - "status" - ], - "title": "DockerImageBatchJob", - "description": "This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job\ncreated via the \"supply a docker image for a k8s job\" API." - }, - "DockerImageBatchJobBundleV1Response": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "name": { - "type": "string", - "title": "Name" - }, - "created_at": { - "type": "string", - "format": "date-time", - "title": "Created At" - }, - "image_repository": { - "type": "string", - "title": "Image Repository" - }, - "image_tag": { - "type": "string", - "title": "Image Tag" - }, - "command": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Command" - }, - "env": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Env" - }, - "mount_location": { - "title": "Mount Location", - "type": "string", - "nullable": true - }, - "cpus": { - "title": "Cpus", - "type": "string", - "nullable": true - }, - "memory": { - "title": "Memory", - "type": "string", - "nullable": true - }, - "storage": { - "title": "Storage", - "type": "string", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "gpu_type": { - "title": "Gpu Type", - "type": "string", - "nullable": true - }, - "public": { - "title": "Public", - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "name", - "created_at", - "image_repository", - "image_tag", - "command", - "env" - ], - "title": "DockerImageBatchJobBundleV1Response" - }, - "EndpointPredictV1Request": { - "properties": { - "url": { - "title": "Url", - "type": "string", - "nullable": true - }, - "args": { - "$ref": "#/components/schemas/RequestSchema", - "nullable": true - }, - "cloudpickle": { - "title": "Cloudpickle", - "type": "string", - "nullable": true - }, - "callback_url": { - "title": "Callback Url", - "type": "string", - "nullable": true - }, - "callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "return_pickled": { - "type": "boolean", - "title": "Return Pickled", - "default": false - }, - "destination_path": { - "title": "Destination Path", - "type": "string", - "nullable": true - } - }, - "type": "object", - "title": "EndpointPredictV1Request" - }, - "File": { - "properties": { - "filename": { - "title": "Filename", - "description": "The name of the file, used when passing the file to the model as a \nstring.\n", - "type": "string", - "nullable": true - }, - "file_data": { - "title": "File Data", - "description": "The base64 encoded file data, used when passing the file to the model \nas a string.\n", - "type": "string", - "nullable": true - }, - "file_id": { - "title": "File Id", - "description": "The ID of an uploaded file to use as input.\n", - "type": "string", - "nullable": true - } - }, - "type": "object", - "title": "File" - }, - "FilteredChatCompletionV2Request": { - "properties": { - "best_of": { - "title": "Best Of", - "description": "Number of output sequences that are generated from the prompt.\n From these `best_of` sequences, the top `n` sequences are returned.\n `best_of` must be greater than or equal to `n`. This is treated as\n the beam width when `use_beam_search` is True. By default, `best_of`\n is set to `n`.", - "type": "integer", - "nullable": true - }, - "top_k": { - "title": "Top K", - "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", - "type": "integer", - "minimum": -1.0, - "nullable": true - }, - "min_p": { - "title": "Min P", - "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", - "type": "number", - "nullable": true - }, - "use_beam_search": { - "title": "Use Beam Search", - "description": "Whether to use beam search for sampling.", - "type": "boolean", - "nullable": true - }, - "length_penalty": { - "title": "Length Penalty", - "description": "Float that penalizes sequences based on their length.\n Used in beam search.", - "type": "number", - "nullable": true - }, - "repetition_penalty": { - "title": "Repetition Penalty", - "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", - "type": "number", - "nullable": true - }, - "early_stopping": { - "title": "Early Stopping", - "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", - "type": "boolean", - "nullable": true - }, - "stop_token_ids": { - "title": "Stop Token Ids", - "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "include_stop_str_in_output": { - "title": "Include Stop Str In Output", - "description": "Whether to include the stop strings in\n output text. Defaults to False.", - "type": "boolean", - "nullable": true - }, - "ignore_eos": { - "title": "Ignore Eos", - "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", - "type": "boolean", - "nullable": true - }, - "min_tokens": { - "title": "Min Tokens", - "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", - "type": "integer", - "nullable": true - }, - "skip_special_tokens": { - "title": "Skip Special Tokens", - "description": "Whether to skip special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "spaces_between_special_tokens": { - "title": "Spaces Between Special Tokens", - "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "echo": { - "title": "Echo", - "description": "If true, the new message will be prepended with the last message if they belong to the same role.", - "type": "boolean", - "nullable": true - }, - "add_generation_prompt": { - "title": "Add Generation Prompt", - "description": "If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model.", - "type": "boolean", - "nullable": true - }, - "continue_final_message": { - "title": "Continue Final Message", - "description": "If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`.", - "type": "boolean", - "nullable": true - }, - "add_special_tokens": { - "title": "Add Special Tokens", - "description": "If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default).", - "type": "boolean", - "nullable": true - }, - "documents": { - "title": "Documents", - "description": "A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys.", - "items": { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - "type": "array", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given", - "type": "string", - "nullable": true - }, - "chat_template_kwargs": { - "title": "Chat Template Kwargs", - "description": "Additional kwargs to pass to the template renderer. Will be accessible by the chat template.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_json": { - "title": "Guided Json", - "description": "JSON schema for guided decoding. Only supported in vllm.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_regex": { - "title": "Guided Regex", - "description": "Regex for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_choice": { - "title": "Guided Choice", - "description": "Choices for guided decoding. Only supported in vllm.", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "guided_grammar": { - "title": "Guided Grammar", - "description": "Context-free grammar for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_decoding_backend": { - "title": "Guided Decoding Backend", - "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", - "type": "string", - "nullable": true - }, - "guided_whitespace_pattern": { - "title": "Guided Whitespace Pattern", - "description": "If specified, will override the default whitespace pattern for guided json decoding.", - "type": "string", - "nullable": true - }, - "priority": { - "title": "Priority", - "description": "The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.", - "type": "integer", - "nullable": true - }, - "metadata": { - "$ref": "#/components/schemas/Metadata", - "nullable": true - }, - "temperature": { - "title": "Temperature", - "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.\n", - "default": 1, - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "top_p": { - "title": "Top P", - "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", - "default": 1, - "type": "number", - "maximum": 1.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "user": { - "title": "User", - "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", - "type": "string", - "nullable": true, - "example": "user-1234" - }, - "service_tier": { - "$ref": "#/components/schemas/ServiceTier", - "nullable": true - }, - "messages": { - "items": { - "$ref": "#/components/schemas/ChatCompletionRequestMessage" - }, - "type": "array", - "minItems": 1, - "title": "Messages", - "description": "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n" - }, - "model": { - "title": "Model", - "type": "string", - "nullable": true - }, - "modalities": { - "$ref": "#/components/schemas/ResponseModalities", - "nullable": true - }, - "reasoning_effort": { - "$ref": "#/components/schemas/ReasoningEffort", - "nullable": true - }, - "max_completion_tokens": { - "title": "Max Completion Tokens", - "description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n", - "type": "integer", - "nullable": true - }, - "frequency_penalty": { - "title": "Frequency Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim.\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "presence_penalty": { - "title": "Presence Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics.\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "web_search_options": { - "title": "Web search", - "description": "This tool searches the web for relevant results to use in a response.\nLearn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).\n", - "$ref": "#/components/schemas/WebSearchOptions", - "nullable": true - }, - "top_logprobs": { - "title": "Top Logprobs", - "description": "An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n", - "type": "integer", - "maximum": 20.0, - "minimum": 0.0, - "nullable": true - }, - "response_format": { - "anyOf": [ - { - "$ref": "#/components/schemas/ResponseFormatText" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonSchema" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonObject" - } - ], - "title": "Response Format", - "description": "An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.\n", - "nullable": true - }, - "audio": { - "description": "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](/docs/guides/audio).\n", - "$ref": "#/components/schemas/Audio2", - "nullable": true - }, - "store": { - "title": "Store", - "description": "Whether or not to store the output of this chat completion request for \nuse in our [model distillation](/docs/guides/distillation) or\n[evals](/docs/guides/evals) products.\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "stream": { - "title": "Stream", - "default": false, - "type": "boolean", - "nullable": true - }, - "stop": { - "$ref": "#/components/schemas/StopConfiguration", - "nullable": true - }, - "logit_bias": { - "title": "Logit Bias", - "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the\ntokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should\ndecrease or increase likelihood of selection; values like -100 or 100\nshould result in a ban or exclusive selection of the relevant token.\n", - "additionalProperties": { - "type": "integer" - }, - "type": "object", - "nullable": true - }, - "logprobs": { - "title": "Logprobs", - "description": "Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the\n`content` of `message`.\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "max_tokens": { - "title": "Max Tokens", - "description": "The maximum number of [tokens](/tokenizer) that can be generated in the\nchat completion. This value can be used to control\n[costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with [o-series models](/docs/guides/reasoning).\n", - "type": "integer", - "nullable": true - }, - "n": { - "title": "N", - "description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.", - "default": 1, - "type": "integer", - "maximum": 128.0, - "minimum": 1.0, - "nullable": true, - "example": 1 - }, - "prediction": { - "description": "Configuration for a [Predicted Output](/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.\n", - "$ref": "#/components/schemas/PredictionContent", - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", - "type": "integer", - "maximum": 9.223372036854776e+18, - "minimum": -9.223372036854776e+18, - "nullable": true - }, - "stream_options": { - "$ref": "#/components/schemas/ChatCompletionStreamOptions", - "nullable": true - }, - "tools": { - "title": "Tools", - "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n", - "items": { - "$ref": "#/components/schemas/ChatCompletionTool" - }, - "type": "array", - "nullable": true - }, - "tool_choice": { - "$ref": "#/components/schemas/ChatCompletionToolChoiceOption", - "nullable": true - }, - "parallel_tool_calls": { - "$ref": "#/components/schemas/ParallelToolCalls", - "nullable": true - }, - "function_call": { - "anyOf": [ - { - "type": "string", - "enum": [ - "none", - "auto" - ] - }, - { - "$ref": "#/components/schemas/ChatCompletionFunctionCallOption" - } - ], - "title": "Function Call", - "description": "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n\n`none` means the model will not call a function and instead generates a\nmessage.\n\n`auto` means the model can pick between generating a message or calling a\nfunction.\n\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the\nmodel to call that function.\n\n`none` is the default when no functions are present. `auto` is the default\nif functions are present.\n", - "nullable": true - }, - "functions": { - "title": "Functions", - "description": "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n", - "items": { - "$ref": "#/components/schemas/ChatCompletionFunctions" - }, - "type": "array", - "maxItems": 128, - "minItems": 1, - "nullable": true - } - }, - "type": "object", - "required": [ - "messages" - ], - "title": "FilteredChatCompletionV2Request" - }, - "FilteredCompletionV2Request": { - "properties": { - "best_of": { - "title": "Best Of", - "description": "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return \u2013 `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", - "default": 1, - "type": "integer", - "maximum": 20.0, - "minimum": 0.0, - "nullable": true - }, - "top_k": { - "title": "Top K", - "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", - "type": "integer", - "minimum": -1.0, - "nullable": true - }, - "min_p": { - "title": "Min P", - "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", - "type": "number", - "nullable": true - }, - "use_beam_search": { - "title": "Use Beam Search", - "description": "Whether to use beam search for sampling.", - "type": "boolean", - "nullable": true - }, - "length_penalty": { - "title": "Length Penalty", - "description": "Float that penalizes sequences based on their length.\n Used in beam search.", - "type": "number", - "nullable": true - }, - "repetition_penalty": { - "title": "Repetition Penalty", - "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", - "type": "number", - "nullable": true - }, - "early_stopping": { - "title": "Early Stopping", - "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", - "type": "boolean", - "nullable": true - }, - "stop_token_ids": { - "title": "Stop Token Ids", - "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "include_stop_str_in_output": { - "title": "Include Stop Str In Output", - "description": "Whether to include the stop strings in output text.", - "type": "boolean", - "nullable": true - }, - "ignore_eos": { - "title": "Ignore Eos", - "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", - "type": "boolean", - "nullable": true - }, - "min_tokens": { - "title": "Min Tokens", - "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", - "type": "integer", - "nullable": true - }, - "skip_special_tokens": { - "title": "Skip Special Tokens", - "description": "Whether to skip special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "spaces_between_special_tokens": { - "title": "Spaces Between Special Tokens", - "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", - "default": true, - "type": "boolean", - "nullable": true - }, - "add_special_tokens": { - "title": "Add Special Tokens", - "description": "If true (the default), special tokens (e.g. BOS) will be added to the prompt.", - "type": "boolean", - "nullable": true - }, - "response_format": { - "anyOf": [ - { - "$ref": "#/components/schemas/ResponseFormatText" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonSchema" - }, - { - "$ref": "#/components/schemas/ResponseFormatJsonObject" - } - ], - "title": "Response Format", - "description": "Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported.", - "nullable": true - }, - "guided_json": { - "title": "Guided Json", - "description": "JSON schema for guided decoding. Only supported in vllm.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "guided_regex": { - "title": "Guided Regex", - "description": "Regex for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_choice": { - "title": "Guided Choice", - "description": "Choices for guided decoding. Only supported in vllm.", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "guided_grammar": { - "title": "Guided Grammar", - "description": "Context-free grammar for guided decoding. Only supported in vllm.", - "type": "string", - "nullable": true - }, - "guided_decoding_backend": { - "title": "Guided Decoding Backend", - "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", - "type": "string", - "nullable": true - }, - "guided_whitespace_pattern": { - "title": "Guided Whitespace Pattern", - "description": "If specified, will override the default whitespace pattern for guided json decoding.", - "type": "string", - "nullable": true - }, - "model": { - "title": "Model", - "type": "string", - "nullable": true - }, - "prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "$ref": "#/components/schemas/Prompt" - }, - { - "$ref": "#/components/schemas/Prompt1" - } - ], - "title": "Prompt", - "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", - "nullable": true - }, - "echo": { - "title": "Echo", - "description": "Echo back the prompt in addition to the completion\n", - "default": false, - "type": "boolean", - "nullable": true - }, - "frequency_penalty": { - "title": "Frequency Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "logit_bias": { - "title": "Logit Bias", - "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n", - "additionalProperties": { - "type": "integer" - }, - "type": "object", - "nullable": true - }, - "logprobs": { - "title": "Logprobs", - "description": "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n", - "type": "integer", - "maximum": 5.0, - "minimum": 0.0, - "nullable": true - }, - "max_tokens": { - "title": "Max Tokens", - "description": "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n", - "default": 16, - "type": "integer", - "minimum": 0.0, - "nullable": true, - "example": 16 - }, - "n": { - "title": "N", - "description": "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", - "default": 1, - "type": "integer", - "maximum": 128.0, - "minimum": 1.0, - "nullable": true, - "example": 1 - }, - "presence_penalty": { - "title": "Presence Penalty", - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", - "default": 0, - "type": "number", - "maximum": 2.0, - "minimum": -2.0, - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", - "type": "integer", - "nullable": true - }, - "stop": { - "$ref": "#/components/schemas/StopConfiguration", - "nullable": true - }, - "stream": { - "title": "Stream", - "default": false, - "type": "boolean", - "nullable": true - }, - "stream_options": { - "$ref": "#/components/schemas/ChatCompletionStreamOptions", - "nullable": true - }, - "suffix": { - "title": "Suffix", - "description": "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n", - "type": "string", - "nullable": true, - "example": "test." - }, - "temperature": { - "title": "Temperature", - "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n", - "default": 1, - "type": "number", - "maximum": 2.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "top_p": { - "title": "Top P", - "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", - "default": 1, - "type": "number", - "maximum": 1.0, - "minimum": 0.0, - "nullable": true, - "example": 1 - }, - "user": { - "title": "User", - "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", - "type": "string", - "nullable": true, - "example": "user-1234" - } - }, - "type": "object", - "required": [ - "prompt" - ], - "title": "FilteredCompletionV2Request" - }, - "Function1": { - "properties": { - "name": { - "type": "string", - "title": "Name", - "description": "The name of the function to call." - }, - "arguments": { - "type": "string", - "title": "Arguments", - "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." - } - }, - "type": "object", - "required": [ - "name", - "arguments" - ], - "title": "Function1" - }, - "Function2": { - "properties": { - "name": { - "title": "Name", - "description": "The name of the function to call.", - "type": "string", - "nullable": true - }, - "arguments": { - "title": "Arguments", - "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "title": "Function2" - }, - "Function3": { - "properties": { - "name": { - "type": "string", - "title": "Name", - "description": "The name of the function to call." - } - }, - "type": "object", - "required": [ - "name" - ], - "title": "Function3" - }, - "FunctionCall": { - "properties": { - "arguments": { - "type": "string", - "title": "Arguments", - "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." - }, - "name": { - "type": "string", - "title": "Name", - "description": "The name of the function to call." - } - }, - "type": "object", - "required": [ - "arguments", - "name" - ], - "title": "FunctionCall" - }, - "FunctionCall2": { - "properties": { - "arguments": { - "title": "Arguments", - "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.", - "type": "string", - "nullable": true - }, - "name": { - "title": "Name", - "description": "The name of the function to call.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "title": "FunctionCall2" - }, - "FunctionObject": { - "properties": { - "description": { - "title": "Description", - "description": "A description of what the function does, used by the model to choose when and how to call the function.", - "type": "string", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name", - "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." - }, - "parameters": { - "$ref": "#/components/schemas/FunctionParameters", - "nullable": true - }, - "strict": { - "title": "Strict", - "description": "Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).", - "default": false, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "name" - ], - "title": "FunctionObject" - }, - "FunctionParameters": { - "properties": {}, - "additionalProperties": true, - "type": "object", - "title": "FunctionParameters" - }, - "GetAsyncTaskV1Response": { - "properties": { - "task_id": { - "type": "string", - "title": "Task Id" - }, - "status": { - "$ref": "#/components/schemas/TaskStatus" - }, - "result": { - "$ref": "#/components/schemas/ResponseSchema", - "nullable": true - }, - "traceback": { - "title": "Traceback", - "type": "string", - "nullable": true - }, - "status_code": { - "title": "Status Code", - "type": "integer", - "nullable": true - } - }, - "type": "object", - "required": [ - "task_id", - "status" - ], - "title": "GetAsyncTaskV1Response" - }, - "GetBatchCompletionV2Response": { - "properties": { - "job": { - "$ref": "#/components/schemas/BatchCompletionsJob" - } - }, - "type": "object", - "required": [ - "job" - ], - "title": "GetBatchCompletionV2Response" - }, - "GetBatchJobV1Response": { - "properties": { - "status": { - "$ref": "#/components/schemas/BatchJobStatus" - }, - "result": { - "title": "Result", - "type": "string", - "nullable": true - }, - "duration": { - "type": "string", - "format": "duration", - "title": "Duration" - }, - "num_tasks_pending": { - "title": "Num Tasks Pending", - "type": "integer", - "nullable": true - }, - "num_tasks_completed": { - "title": "Num Tasks Completed", - "type": "integer", - "nullable": true - } - }, - "type": "object", - "required": [ - "status", - "duration" - ], - "title": "GetBatchJobV1Response" - }, - "GetDockerImageBatchJobV1Response": { - "properties": { - "status": { - "$ref": "#/components/schemas/BatchJobStatus" - } - }, - "type": "object", - "required": [ - "status" - ], - "title": "GetDockerImageBatchJobV1Response" - }, - "GetFileContentResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "ID of the requested file." - }, - "content": { - "type": "string", - "title": "Content", - "description": "File content." - } - }, - "type": "object", - "required": [ - "id", - "content" - ], - "title": "GetFileContentResponse", - "description": "Response object for retrieving a file's content." - }, - "GetFileResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "ID of the requested file." - }, - "filename": { - "type": "string", - "title": "Filename", - "description": "File name." - }, - "size": { - "type": "integer", - "title": "Size", - "description": "Length of the file, in characters." - } - }, - "type": "object", - "required": [ - "id", - "filename", - "size" - ], - "title": "GetFileResponse", - "description": "Response object for retrieving a file." - }, - "GetFineTuneEventsResponse": { - "properties": { - "events": { - "items": { - "$ref": "#/components/schemas/LLMFineTuneEvent" - }, - "type": "array", - "title": "Events" - } - }, - "type": "object", - "required": [ - "events" - ], - "title": "GetFineTuneEventsResponse" - }, - "GetFineTuneResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "Unique ID of the fine tune" - }, - "fine_tuned_model": { - "title": "Fine Tuned Model", - "description": "Name of the resulting fine-tuned model. This can be plugged into the Completion API ones the fine-tune is complete", - "type": "string", - "nullable": true - }, - "status": { - "$ref": "#/components/schemas/BatchJobStatus", - "description": "Status of the requested fine tune." - } - }, - "type": "object", - "required": [ - "id", - "status" - ], - "title": "GetFineTuneResponse" - }, - "GetLLMModelEndpointV1Response": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "name": { - "type": "string", - "title": "Name" - }, - "model_name": { - "type": "string", - "title": "Model Name" - }, - "source": { - "$ref": "#/components/schemas/LLMSource" - }, - "status": { - "$ref": "#/components/schemas/ModelEndpointStatus" - }, - "inference_framework": { - "$ref": "#/components/schemas/LLMInferenceFramework" - }, - "inference_framework_image_tag": { - "title": "Inference Framework Image Tag", - "type": "string", - "nullable": true - }, - "num_shards": { - "title": "Num Shards", - "type": "integer", - "nullable": true - }, - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "spec": { - "$ref": "#/components/schemas/GetModelEndpointV1Response", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "name", - "model_name", - "source", - "status", - "inference_framework" - ], - "title": "GetLLMModelEndpointV1Response" - }, - "GetModelEndpointV1Response": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "name": { - "type": "string", - "title": "Name" - }, - "endpoint_type": { - "$ref": "#/components/schemas/ModelEndpointType" - }, - "destination": { - "type": "string", - "title": "Destination" - }, - "deployment_name": { - "title": "Deployment Name", - "type": "string", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "bundle_name": { - "type": "string", - "title": "Bundle Name" - }, - "status": { - "$ref": "#/components/schemas/ModelEndpointStatus" - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "labels": { - "title": "Labels", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "aws_role": { - "title": "Aws Role", - "type": "string", - "nullable": true - }, - "results_s3_bucket": { - "title": "Results S3 Bucket", - "type": "string", - "nullable": true - }, - "created_by": { - "type": "string", - "title": "Created By" - }, - "created_at": { - "type": "string", - "format": "date-time", - "title": "Created At" - }, - "last_updated_at": { - "type": "string", - "format": "date-time", - "title": "Last Updated At" - }, - "deployment_state": { - "$ref": "#/components/schemas/ModelEndpointDeploymentState", - "nullable": true - }, - "resource_state": { - "$ref": "#/components/schemas/ModelEndpointResourceState", - "nullable": true - }, - "num_queued_items": { - "title": "Num Queued Items", - "type": "integer", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "name", - "endpoint_type", - "destination", - "bundle_name", - "status", - "created_by", - "created_at", - "last_updated_at" - ], - "title": "GetModelEndpointV1Response" - }, - "GetTriggerV1Response": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "name": { - "type": "string", - "title": "Name" - }, - "owner": { - "type": "string", - "title": "Owner" - }, - "created_by": { - "type": "string", - "title": "Created By" - }, - "created_at": { - "type": "string", - "format": "date-time", - "title": "Created At" - }, - "cron_schedule": { - "type": "string", - "title": "Cron Schedule" - }, - "docker_image_batch_job_bundle_id": { - "type": "string", - "title": "Docker Image Batch Job Bundle Id" - }, - "default_job_config": { - "title": "Default Job Config", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_job_metadata": { - "title": "Default Job Metadata", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "name", - "owner", - "created_by", - "created_at", - "cron_schedule", - "docker_image_batch_job_bundle_id" - ], - "title": "GetTriggerV1Response" - }, - "GpuType": { - "type": "string", - "enum": [ - "nvidia-tesla-t4", - "nvidia-ampere-a10", - "nvidia-ampere-a100", - "nvidia-ampere-a100e", - "nvidia-hopper-h100", - "nvidia-hopper-h100-1g20gb", - "nvidia-hopper-h100-3g40gb" - ], - "title": "GpuType", - "description": "Lists allowed GPU types for Launch." - }, - "HTTPValidationError": { - "properties": { - "detail": { - "items": { - "$ref": "#/components/schemas/ValidationError" - }, - "type": "array", - "title": "Detail" - } - }, - "type": "object", - "title": "HTTPValidationError" - }, - "ImageUrl": { - "properties": { - "url": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", - "title": "Url", - "description": "Either a URL of the image or the base64 encoded image data.", - "example": "http://www.example.com/" - }, - "detail": { - "type": "string", - "enum": [ - "auto", - "low", - "high" - ], - "title": "Detail", - "description": "Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).", - "default": "auto" - } - }, - "type": "object", - "required": [ - "url" - ], - "title": "ImageUrl" - }, - "InputAudio": { - "properties": { - "data": { - "type": "string", - "title": "Data", - "description": "Base64 encoded audio data." - }, - "format": { - "type": "string", - "enum": [ - "wav", - "mp3" - ], - "title": "Format", - "description": "The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".\n" - } - }, - "type": "object", - "required": [ - "data", - "format" - ], - "title": "InputAudio" - }, - "JsonSchema": { - "properties": { - "description": { - "title": "Description", - "description": "A description of what the response format is for, used by the model to\ndetermine how to respond in the format.\n", - "type": "string", - "nullable": true - }, - "name": { - "type": "string", - "title": "Name", - "description": "The name of the response format. Must be a-z, A-Z, 0-9, or contain\nunderscores and dashes, with a maximum length of 64.\n" - }, - "schema": { - "$ref": "#/components/schemas/ResponseFormatJsonSchemaSchema", - "nullable": true - }, - "strict": { - "title": "Strict", - "description": "Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n", - "default": false, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "name" - ], - "title": "JsonSchema" - }, - "LLMFineTuneEvent": { - "properties": { - "timestamp": { - "title": "Timestamp", - "type": "number", - "nullable": true - }, - "message": { - "type": "string", - "title": "Message" - }, - "level": { - "type": "string", - "title": "Level" - } - }, - "type": "object", - "required": [ - "message", - "level" - ], - "title": "LLMFineTuneEvent" - }, - "LLMInferenceFramework": { - "type": "string", - "enum": [ - "deepspeed", - "text_generation_inference", - "vllm", - "lightllm", - "tensorrt_llm", - "sglang" - ], - "title": "LLMInferenceFramework" - }, - "LLMSource": { - "type": "string", - "enum": [ - "hugging_face" - ], - "title": "LLMSource" - }, - "ListDockerImageBatchJobBundleV1Response": { - "properties": { - "docker_image_batch_job_bundles": { - "items": { - "$ref": "#/components/schemas/DockerImageBatchJobBundleV1Response" - }, - "type": "array", - "title": "Docker Image Batch Job Bundles" - } - }, - "type": "object", - "required": [ - "docker_image_batch_job_bundles" - ], - "title": "ListDockerImageBatchJobBundleV1Response" - }, - "ListDockerImageBatchJobsV1Response": { - "properties": { - "jobs": { - "items": { - "$ref": "#/components/schemas/DockerImageBatchJob" - }, - "type": "array", - "title": "Jobs" - } - }, - "type": "object", - "required": [ - "jobs" - ], - "title": "ListDockerImageBatchJobsV1Response" - }, - "ListFilesResponse": { - "properties": { - "files": { - "items": { - "$ref": "#/components/schemas/GetFileResponse" - }, - "type": "array", - "title": "Files", - "description": "List of file IDs, names, and sizes." - } - }, - "type": "object", - "required": [ - "files" - ], - "title": "ListFilesResponse", - "description": "Response object for listing files." - }, - "ListFineTunesResponse": { - "properties": { - "jobs": { - "items": { - "$ref": "#/components/schemas/GetFineTuneResponse" - }, - "type": "array", - "title": "Jobs" - } - }, - "type": "object", - "required": [ - "jobs" - ], - "title": "ListFineTunesResponse" - }, - "ListLLMModelEndpointsV1Response": { - "properties": { - "model_endpoints": { - "items": { - "$ref": "#/components/schemas/GetLLMModelEndpointV1Response" - }, - "type": "array", - "title": "Model Endpoints" - } - }, - "type": "object", - "required": [ - "model_endpoints" - ], - "title": "ListLLMModelEndpointsV1Response" - }, - "ListModelBundlesV1Response": { - "properties": { - "model_bundles": { - "items": { - "$ref": "#/components/schemas/ModelBundleV1Response" - }, - "type": "array", - "title": "Model Bundles" - } - }, - "type": "object", - "required": [ - "model_bundles" - ], - "title": "ListModelBundlesV1Response", - "description": "Response object for listing Model Bundles." - }, - "ListModelBundlesV2Response": { - "properties": { - "model_bundles": { - "items": { - "$ref": "#/components/schemas/ModelBundleV2Response" - }, - "type": "array", - "title": "Model Bundles" - } - }, - "type": "object", - "required": [ - "model_bundles" - ], - "title": "ListModelBundlesV2Response", - "description": "Response object for listing Model Bundles." - }, - "ListModelEndpointsV1Response": { - "properties": { - "model_endpoints": { - "items": { - "$ref": "#/components/schemas/GetModelEndpointV1Response" - }, - "type": "array", - "title": "Model Endpoints" - } - }, - "type": "object", - "required": [ - "model_endpoints" - ], - "title": "ListModelEndpointsV1Response" - }, - "ListTriggersV1Response": { - "properties": { - "triggers": { - "items": { - "$ref": "#/components/schemas/GetTriggerV1Response" - }, - "type": "array", - "title": "Triggers" - } - }, - "type": "object", - "required": [ - "triggers" - ], - "title": "ListTriggersV1Response" - }, - "Logprobs": { - "properties": { - "content": { - "title": "Content", - "description": "A list of message content tokens with log probability information.", - "items": { - "$ref": "#/components/schemas/ChatCompletionTokenLogprob" - }, - "type": "array", - "nullable": true - }, - "refusal": { - "title": "Refusal", - "description": "A list of message refusal tokens with log probability information.", - "items": { - "$ref": "#/components/schemas/ChatCompletionTokenLogprob" - }, - "type": "array", - "nullable": true - } - }, - "type": "object", - "required": [ - "content", - "refusal" - ], - "title": "Logprobs" - }, - "Logprobs2": { - "properties": { - "text_offset": { - "title": "Text Offset", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "token_logprobs": { - "title": "Token Logprobs", - "items": { - "type": "number" - }, - "type": "array", - "nullable": true - }, - "tokens": { - "title": "Tokens", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "top_logprobs": { - "title": "Top Logprobs", - "items": { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - "type": "array", - "nullable": true - } - }, - "type": "object", - "title": "Logprobs2" - }, - "Metadata": { - "title": "Metadata", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "ModelBundleEnvironmentParams": { - "properties": { - "framework_type": { - "$ref": "#/components/schemas/ModelBundleFrameworkType" - }, - "pytorch_image_tag": { - "title": "Pytorch Image Tag", - "type": "string", - "nullable": true - }, - "tensorflow_version": { - "title": "Tensorflow Version", - "type": "string", - "nullable": true - }, - "ecr_repo": { - "title": "Ecr Repo", - "type": "string", - "nullable": true - }, - "image_tag": { - "title": "Image Tag", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "framework_type" - ], - "title": "ModelBundleEnvironmentParams", - "description": "This is the entity-layer class for the Model Bundle environment parameters. Being an\nentity-layer class, it should be a plain data object." - }, - "ModelBundleFrameworkType": { - "type": "string", - "enum": [ - "pytorch", - "tensorflow", - "custom_base_image" - ], - "title": "ModelBundleFrameworkType", - "description": "The canonical list of possible machine learning frameworks of Model Bundles." - }, - "ModelBundleOrderBy": { - "type": "string", - "enum": [ - "newest", - "oldest" - ], - "title": "ModelBundleOrderBy", - "description": "The canonical list of possible orderings of Model Bundles." - }, - "ModelBundlePackagingType": { - "type": "string", - "enum": [ - "cloudpickle", - "zip", - "lira" - ], - "title": "ModelBundlePackagingType", - "description": "The canonical list of possible packaging types for Model Bundles.\n\nThese values broadly determine how the model endpoint will obtain its code & dependencies." - }, - "ModelBundleV1Response": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "name": { - "type": "string", - "title": "Name" - }, - "location": { - "type": "string", - "title": "Location" - }, - "requirements": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Requirements" - }, - "env_params": { - "$ref": "#/components/schemas/ModelBundleEnvironmentParams" - }, - "packaging_type": { - "$ref": "#/components/schemas/ModelBundlePackagingType" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "app_config": { - "title": "App Config", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "created_at": { - "type": "string", - "format": "date-time", - "title": "Created At" - }, - "model_artifact_ids": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Model Artifact Ids" - }, - "schema_location": { - "title": "Schema Location", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "id", - "name", - "location", - "requirements", - "env_params", - "packaging_type", - "metadata", - "created_at", - "model_artifact_ids" - ], - "title": "ModelBundleV1Response", - "description": "Response object for a single Model Bundle." - }, - "ModelBundleV2Response": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "name": { - "type": "string", - "title": "Name" - }, - "metadata": { - "additionalProperties": true, - "type": "object", - "title": "Metadata" - }, - "created_at": { - "type": "string", - "format": "date-time", - "title": "Created At" - }, - "model_artifact_ids": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Model Artifact Ids" - }, - "schema_location": { - "title": "Schema Location", - "type": "string", - "nullable": true - }, - "flavor": { - "oneOf": [ - { - "$ref": "#/components/schemas/CloudpickleArtifactFlavor" - }, - { - "$ref": "#/components/schemas/ZipArtifactFlavor" - }, - { - "$ref": "#/components/schemas/RunnableImageFlavor" - }, - { - "$ref": "#/components/schemas/StreamingEnhancedRunnableImageFlavor" - }, - { - "$ref": "#/components/schemas/TritonEnhancedRunnableImageFlavor" - } - ], - "title": "Flavor", - "discriminator": { - "propertyName": "flavor", - "mapping": { - "cloudpickle_artifact": "#/components/schemas/CloudpickleArtifactFlavor", - "runnable_image": "#/components/schemas/RunnableImageFlavor", - "streaming_enhanced_runnable_image": "#/components/schemas/StreamingEnhancedRunnableImageFlavor", - "triton_enhanced_runnable_image": "#/components/schemas/TritonEnhancedRunnableImageFlavor", - "zip_artifact": "#/components/schemas/ZipArtifactFlavor" - } - } - } - }, - "type": "object", - "required": [ - "id", - "name", - "metadata", - "created_at", - "model_artifact_ids", - "flavor" - ], - "title": "ModelBundleV2Response", - "description": "Response object for a single Model Bundle." - }, - "ModelDownloadRequest": { - "properties": { - "model_name": { - "type": "string", - "title": "Model Name", - "description": "Name of the fine tuned model" - }, - "download_format": { - "title": "Download Format", - "description": "Format that you want the downloaded urls to be compatible with. Currently only supports hugging_face", - "default": "hugging_face", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "model_name" - ], - "title": "ModelDownloadRequest" - }, - "ModelDownloadResponse": { - "properties": { - "urls": { - "additionalProperties": { - "type": "string" - }, - "type": "object", - "title": "Urls", - "description": "Dictionary of (file_name, url) pairs to download the model from." - } - }, - "type": "object", - "required": [ - "urls" - ], - "title": "ModelDownloadResponse" - }, - "ModelEndpointDeploymentState": { - "properties": { - "min_workers": { - "type": "integer", - "minimum": 0.0, - "title": "Min Workers" - }, - "max_workers": { - "type": "integer", - "minimum": 0.0, - "title": "Max Workers" - }, - "per_worker": { - "type": "integer", - "exclusiveMinimum": 0.0, - "title": "Per Worker" - }, - "concurrent_requests_per_worker": { - "type": "integer", - "exclusiveMinimum": 0.0, - "title": "Concurrent Requests Per Worker" - }, - "available_workers": { - "title": "Available Workers", - "type": "integer", - "minimum": 0.0, - "nullable": true - }, - "unavailable_workers": { - "title": "Unavailable Workers", - "type": "integer", - "minimum": 0.0, - "nullable": true - } - }, - "type": "object", - "required": [ - "min_workers", - "max_workers", - "per_worker", - "concurrent_requests_per_worker" - ], - "title": "ModelEndpointDeploymentState", - "description": "This is the entity-layer class for the deployment settings related to a Model Endpoint." - }, - "ModelEndpointOrderBy": { - "type": "string", - "enum": [ - "newest", - "oldest", - "alphabetical" - ], - "title": "ModelEndpointOrderBy", - "description": "The canonical list of possible orderings of Model Bundles." - }, - "ModelEndpointResourceState": { - "properties": { - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus" - }, - "gpus": { - "type": "integer", - "minimum": 0.0, - "title": "Gpus" - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory" - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "type": "integer", - "minimum": 1.0, - "title": "Nodes Per Worker" - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "cpus", - "gpus", - "memory", - "nodes_per_worker" - ], - "title": "ModelEndpointResourceState", - "description": "This is the entity-layer class for the resource settings per worker of a Model Endpoint.\nNote: in the multinode case, there are multiple \"nodes\" per \"worker\".\n\"Nodes\" is analogous to a single k8s pod that may take up all the GPUs on a single machine.\n\"Workers\" is the smallest unit that a request can be made to, and consists of one leader \"node\" and\nmultiple follower \"nodes\" (named \"worker\" in the k8s LeaderWorkerSet definition).\ncpus/gpus/memory/storage are per-node, thus the total consumption by a \"worker\"\nis cpus/gpus/etc. multiplied by nodes_per_worker." - }, - "ModelEndpointStatus": { - "type": "string", - "enum": [ - "READY", - "UPDATE_PENDING", - "UPDATE_IN_PROGRESS", - "UPDATE_FAILED", - "DELETE_IN_PROGRESS" - ], - "title": "ModelEndpointStatus" - }, - "ModelEndpointType": { - "type": "string", - "enum": [ - "async", - "sync", - "streaming" - ], - "title": "ModelEndpointType" - }, - "ParallelToolCalls": { - "type": "boolean", - "title": "ParallelToolCalls", - "description": "Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use." - }, - "PredictionContent": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of the predicted content you want to provide. This type is\ncurrently always `content`.\n", - "enum": [ - "content" - ] - }, - "content": { - "anyOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/Content8" - } - ], - "title": "Content", - "description": "The content that should be matched when generating a model response.\nIf generated tokens would match this content, the entire model response\ncan be returned much more quickly.\n" - } - }, - "type": "object", - "required": [ - "type", - "content" - ], - "title": "PredictionContent" - }, - "Prompt": { - "title": "Prompt", - "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", - "items": { - "type": "integer" - }, - "type": "array", - "minItems": 1, - "nullable": true - }, - "Prompt1": { - "title": "Prompt1", - "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", - "items": { - "$ref": "#/components/schemas/Prompt1Item" - }, - "type": "array", - "minItems": 1, - "nullable": true - }, - "Prompt1Item": { - "items": { - "type": "integer" - }, - "type": "array", - "minItems": 1, - "title": "Prompt1Item" - }, - "PromptTokensDetails": { - "properties": { - "audio_tokens": { - "type": "integer", - "title": "Audio Tokens", - "description": "Audio input tokens present in the prompt.", - "default": 0 - }, - "cached_tokens": { - "type": "integer", - "title": "Cached Tokens", - "description": "Cached tokens present in the prompt.", - "default": 0 - } - }, - "type": "object", - "title": "PromptTokensDetails" - }, - "PytorchFramework": { - "properties": { - "framework_type": { - "type": "string", - "title": "Framework Type", - "enum": [ - "pytorch" - ] - }, - "pytorch_image_tag": { - "type": "string", - "title": "Pytorch Image Tag" - } - }, - "type": "object", - "required": [ - "framework_type", - "pytorch_image_tag" - ], - "title": "PytorchFramework", - "description": "This is the entity-layer class for a Pytorch framework specification." - }, - "Quantization": { - "type": "string", - "enum": [ - "bitsandbytes", - "awq" - ], - "title": "Quantization" - }, - "ReasoningEffort": { - "title": "ReasoningEffort", - "description": "**o-series models only** \n\nConstrains effort on reasoning for \n[reasoning models](https://platform.openai.com/docs/guides/reasoning).\nCurrently supported values are `low`, `medium`, and `high`. Reducing\nreasoning effort can result in faster responses and fewer tokens used\non reasoning in a response.\n", - "default": "medium", - "type": "string", - "enum": [ - "low", - "medium", - "high" - ], - "nullable": true - }, - "RequestSchema": { - "title": "RequestSchema" - }, - "ResponseFormatJsonObject": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of response format being defined. Always `json_object`.", - "enum": [ - "json_object" - ] - } - }, - "type": "object", - "required": [ - "type" - ], - "title": "ResponseFormatJsonObject" - }, - "ResponseFormatJsonSchema": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of response format being defined. Always `json_schema`.", - "enum": [ - "json_schema" - ] - }, - "json_schema": { - "$ref": "#/components/schemas/JsonSchema", - "title": "JSON schema", - "description": "Structured Outputs configuration options, including a JSON Schema.\n" - } - }, - "type": "object", - "required": [ - "type", - "json_schema" - ], - "title": "ResponseFormatJsonSchema" - }, - "ResponseFormatJsonSchemaSchema": { - "properties": {}, - "additionalProperties": true, - "type": "object", - "title": "ResponseFormatJsonSchemaSchema" - }, - "ResponseFormatText": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of response format being defined. Always `text`.", - "enum": [ - "text" - ] - } - }, - "type": "object", - "required": [ - "type" - ], - "title": "ResponseFormatText" - }, - "ResponseModalities": { - "title": "ResponseModalities", - "description": "Output types that you would like the model to generate.\nMost models are capable of generating text, which is the default:\n\n`[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to \n[generate audio](/docs/guides/audio). To request that this model generate \nboth text and audio responses, you can use:\n\n`[\"text\", \"audio\"]`\n", - "items": { - "type": "string", - "enum": [ - "text", - "audio" - ] - }, - "type": "array", - "nullable": true - }, - "ResponseSchema": { - "title": "ResponseSchema" - }, - "RestartModelEndpointV1Response": { - "properties": { - "restarted": { - "type": "boolean", - "title": "Restarted" - } - }, - "type": "object", - "required": [ - "restarted" - ], - "title": "RestartModelEndpointV1Response" - }, - "RunnableImageFlavor": { - "properties": { - "repository": { - "type": "string", - "title": "Repository" - }, - "tag": { - "type": "string", - "title": "Tag" - }, - "command": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Command" - }, - "predict_route": { - "type": "string", - "title": "Predict Route", - "default": "/predict" - }, - "healthcheck_route": { - "type": "string", - "title": "Healthcheck Route", - "default": "/readyz" - }, - "env": { - "title": "Env", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "protocol": { - "type": "string", - "title": "Protocol", - "enum": [ - "http" - ] - }, - "readiness_initial_delay_seconds": { - "type": "integer", - "title": "Readiness Initial Delay Seconds", - "default": 120 - }, - "extra_routes": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Extra Routes" - }, - "routes": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Routes" - }, - "forwarder_type": { - "title": "Forwarder Type", - "default": "default", - "type": "string", - "nullable": true - }, - "worker_command": { - "title": "Worker Command", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "worker_env": { - "title": "Worker Env", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "flavor": { - "type": "string", - "title": "Flavor", - "enum": [ - "runnable_image" - ] - } - }, - "type": "object", - "required": [ - "repository", - "tag", - "command", - "protocol", - "flavor" - ], - "title": "RunnableImageFlavor", - "description": "This is the entity-layer class for the Model Bundle flavor of a runnable image." - }, - "ServiceTier": { - "title": "ServiceTier", - "description": "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system\n will utilize scale tier credits until they are exhausted.\n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing).\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n", - "default": "auto", - "type": "string", - "enum": [ - "auto", - "default", - "flex" - ], - "nullable": true - }, - "StopConfiguration": { - "anyOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/StopConfiguration1" - } - ], - "title": "StopConfiguration", - "description": "Not supported with latest reasoning models `o3` and `o4-mini`.\n\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.\n", - "nullable": true - }, - "StopConfiguration1": { - "title": "StopConfiguration1", - "description": "Not supported with latest reasoning models `o3` and `o4-mini`.\n\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.\n", - "items": { - "type": "string" - }, - "type": "array", - "maxItems": 4, - "minItems": 1, - "nullable": true - }, - "StreamError": { - "properties": { - "status_code": { - "type": "integer", - "title": "Status Code" - }, - "content": { - "$ref": "#/components/schemas/StreamErrorContent" - } - }, - "type": "object", - "required": [ - "status_code", - "content" - ], - "title": "StreamError", - "description": "Error object for a stream prompt completion task." - }, - "StreamErrorContent": { - "properties": { - "error": { - "type": "string", - "title": "Error" - }, - "timestamp": { - "type": "string", - "title": "Timestamp" - } - }, - "type": "object", - "required": [ - "error", - "timestamp" - ], - "title": "StreamErrorContent" - }, - "StreamingEnhancedRunnableImageFlavor": { - "properties": { - "repository": { - "type": "string", - "title": "Repository" - }, - "tag": { - "type": "string", - "title": "Tag" - }, - "command": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Command", - "default": [] - }, - "predict_route": { - "type": "string", - "title": "Predict Route", - "default": "/predict" - }, - "healthcheck_route": { - "type": "string", - "title": "Healthcheck Route", - "default": "/readyz" - }, - "env": { - "title": "Env", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "protocol": { - "type": "string", - "title": "Protocol", - "enum": [ - "http" - ] - }, - "readiness_initial_delay_seconds": { - "type": "integer", - "title": "Readiness Initial Delay Seconds", - "default": 120 - }, - "extra_routes": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Extra Routes" - }, - "routes": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Routes" - }, - "forwarder_type": { - "title": "Forwarder Type", - "default": "default", - "type": "string", - "nullable": true - }, - "worker_command": { - "title": "Worker Command", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "worker_env": { - "title": "Worker Env", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "flavor": { - "type": "string", - "title": "Flavor", - "enum": [ - "streaming_enhanced_runnable_image" - ] - }, - "streaming_command": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Streaming Command" - }, - "streaming_predict_route": { - "type": "string", - "title": "Streaming Predict Route", - "default": "/stream" - } - }, - "type": "object", - "required": [ - "repository", - "tag", - "protocol", - "flavor", - "streaming_command" - ], - "title": "StreamingEnhancedRunnableImageFlavor", - "description": "For deployments that expose a streaming route in a container." - }, - "SyncEndpointPredictV1Request": { - "properties": { - "url": { - "title": "Url", - "type": "string", - "nullable": true - }, - "args": { - "$ref": "#/components/schemas/RequestSchema", - "nullable": true - }, - "cloudpickle": { - "title": "Cloudpickle", - "type": "string", - "nullable": true - }, - "callback_url": { - "title": "Callback Url", - "type": "string", - "nullable": true - }, - "callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "return_pickled": { - "type": "boolean", - "title": "Return Pickled", - "default": false - }, - "destination_path": { - "title": "Destination Path", - "type": "string", - "nullable": true - }, - "timeout_seconds": { - "title": "Timeout Seconds", - "type": "number", - "exclusiveMinimum": 0.0, - "nullable": true - }, - "num_retries": { - "title": "Num Retries", - "type": "integer", - "minimum": 0.0, - "nullable": true - } - }, - "type": "object", - "title": "SyncEndpointPredictV1Request" - }, - "SyncEndpointPredictV1Response": { - "properties": { - "status": { - "$ref": "#/components/schemas/TaskStatus" - }, - "result": { - "title": "Result", - "nullable": true - }, - "traceback": { - "title": "Traceback", - "type": "string", - "nullable": true - }, - "status_code": { - "title": "Status Code", - "type": "integer", - "nullable": true - } - }, - "type": "object", - "required": [ - "status" - ], - "title": "SyncEndpointPredictV1Response" - }, - "TaskStatus": { - "type": "string", - "enum": [ - "PENDING", - "STARTED", - "SUCCESS", - "FAILURE", - "UNDEFINED" - ], - "title": "TaskStatus" - }, - "TensorflowFramework": { - "properties": { - "framework_type": { - "type": "string", - "title": "Framework Type", - "enum": [ - "tensorflow" - ] - }, - "tensorflow_version": { - "type": "string", - "title": "Tensorflow Version" - } - }, - "type": "object", - "required": [ - "framework_type", - "tensorflow_version" - ], - "title": "TensorflowFramework", - "description": "This is the entity-layer class for a Tensorflow framework specification." - }, - "TokenOutput": { - "properties": { - "token": { - "type": "string", - "title": "Token" - }, - "log_prob": { - "type": "number", - "title": "Log Prob" - } - }, - "type": "object", - "required": [ - "token", - "log_prob" - ], - "title": "TokenOutput", - "description": "Detailed token information." - }, - "ToolConfig": { - "properties": { - "name": { - "type": "string", - "title": "Name" - }, - "max_iterations": { - "title": "Max Iterations", - "default": 10, - "type": "integer", - "nullable": true - }, - "execution_timeout_seconds": { - "title": "Execution Timeout Seconds", - "default": 60, - "type": "integer", - "nullable": true - }, - "should_retry_on_error": { - "title": "Should Retry On Error", - "default": true, - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "required": [ - "name" - ], - "title": "ToolConfig", - "description": "Configuration for tool use.\nNOTE: this config is highly experimental and signature will change significantly in future iterations." - }, - "TopLogprob": { - "properties": { - "token": { - "type": "string", - "title": "Token", - "description": "The token." - }, - "logprob": { - "type": "number", - "title": "Logprob", - "description": "The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely." - }, - "bytes": { - "title": "Bytes", - "description": "A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - } - }, - "type": "object", - "required": [ - "token", - "logprob", - "bytes" - ], - "title": "TopLogprob" - }, - "TritonEnhancedRunnableImageFlavor": { - "properties": { - "repository": { - "type": "string", - "title": "Repository" - }, - "tag": { - "type": "string", - "title": "Tag" - }, - "command": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Command" - }, - "predict_route": { - "type": "string", - "title": "Predict Route", - "default": "/predict" - }, - "healthcheck_route": { - "type": "string", - "title": "Healthcheck Route", - "default": "/readyz" - }, - "env": { - "title": "Env", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "protocol": { - "type": "string", - "title": "Protocol", - "enum": [ - "http" - ] - }, - "readiness_initial_delay_seconds": { - "type": "integer", - "title": "Readiness Initial Delay Seconds", - "default": 120 - }, - "extra_routes": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Extra Routes" - }, - "routes": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Routes" - }, - "forwarder_type": { - "title": "Forwarder Type", - "default": "default", - "type": "string", - "nullable": true - }, - "worker_command": { - "title": "Worker Command", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "worker_env": { - "title": "Worker Env", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "flavor": { - "type": "string", - "title": "Flavor", - "enum": [ - "triton_enhanced_runnable_image" - ] - }, - "triton_model_repository": { - "type": "string", - "title": "Triton Model Repository" - }, - "triton_model_replicas": { - "title": "Triton Model Replicas", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "triton_num_cpu": { - "type": "number", - "title": "Triton Num Cpu" - }, - "triton_commit_tag": { - "type": "string", - "title": "Triton Commit Tag" - }, - "triton_storage": { - "title": "Triton Storage", - "type": "string", - "nullable": true - }, - "triton_memory": { - "title": "Triton Memory", - "type": "string", - "nullable": true - }, - "triton_readiness_initial_delay_seconds": { - "type": "integer", - "title": "Triton Readiness Initial Delay Seconds", - "default": 300 - } - }, - "type": "object", - "required": [ - "repository", - "tag", - "command", - "protocol", - "flavor", - "triton_model_repository", - "triton_num_cpu", - "triton_commit_tag" - ], - "title": "TritonEnhancedRunnableImageFlavor", - "description": "For deployments that require tritonserver running in a container." - }, - "UpdateBatchCompletionsV2Request": { - "properties": { - "job_id": { - "type": "string", - "title": "Job Id", - "description": "ID of the batch completions job" - }, - "priority": { - "title": "Priority", - "description": "Priority of the batch inference job. Default to None.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "required": [ - "job_id" - ], - "title": "UpdateBatchCompletionsV2Request" - }, - "UpdateBatchCompletionsV2Response": { - "properties": { - "job_id": { - "type": "string", - "title": "Job Id" - }, - "input_data_path": { - "title": "Input Data Path", - "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", - "type": "string", - "nullable": true - }, - "output_data_path": { - "type": "string", - "title": "Output Data Path", - "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." - }, - "model_config": { - "$ref": "#/components/schemas/BatchCompletionsModelConfig", - "description": "Model configuration for the batch inference. Hardware configurations are inferred." - }, - "priority": { - "title": "Priority", - "description": "Priority of the batch inference job. Default to None.", - "type": "string", - "nullable": true - }, - "status": { - "$ref": "#/components/schemas/BatchCompletionsJobStatus" - }, - "created_at": { - "type": "string", - "title": "Created At" - }, - "expires_at": { - "type": "string", - "title": "Expires At" - }, - "completed_at": { - "title": "Completed At", - "type": "string", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "success": { - "type": "boolean", - "title": "Success", - "description": "Whether the update was successful" - } - }, - "type": "object", - "required": [ - "job_id", - "output_data_path", - "model_config", - "status", - "created_at", - "expires_at", - "completed_at", - "metadata", - "success" - ], - "title": "UpdateBatchCompletionsV2Response" - }, - "UpdateBatchJobV1Request": { - "properties": { - "cancel": { - "type": "boolean", - "title": "Cancel" - } - }, - "type": "object", - "required": [ - "cancel" - ], - "title": "UpdateBatchJobV1Request" - }, - "UpdateBatchJobV1Response": { - "properties": { - "success": { - "type": "boolean", - "title": "Success" - } - }, - "type": "object", - "required": [ - "success" - ], - "title": "UpdateBatchJobV1Response" - }, - "UpdateDeepSpeedModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "model_name": { - "title": "Model Name", - "type": "string", - "nullable": true - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "nullable": true - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "deepspeed", - "enum": [ - "deepspeed" - ] - }, - "inference_framework_image_tag": { - "title": "Inference Framework Image Tag", - "type": "string", - "nullable": true - }, - "num_shards": { - "title": "Num Shards", - "type": "integer", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "force_bundle_recreation": { - "title": "Force Bundle Recreation", - "default": false, - "type": "boolean", - "nullable": true - }, - "min_workers": { - "title": "Min Workers", - "type": "integer", - "nullable": true - }, - "max_workers": { - "title": "Max Workers", - "type": "integer", - "nullable": true - }, - "per_worker": { - "title": "Per Worker", - "type": "integer", - "nullable": true - }, - "labels": { - "title": "Labels", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - } - }, - "type": "object", - "title": "UpdateDeepSpeedModelEndpointRequest" - }, - "UpdateDockerImageBatchJobV1Request": { - "properties": { - "cancel": { - "type": "boolean", - "title": "Cancel" - } - }, - "type": "object", - "required": [ - "cancel" - ], - "title": "UpdateDockerImageBatchJobV1Request" - }, - "UpdateDockerImageBatchJobV1Response": { - "properties": { - "success": { - "type": "boolean", - "title": "Success" - } - }, - "type": "object", - "required": [ - "success" - ], - "title": "UpdateDockerImageBatchJobV1Response" - }, - "UpdateLLMModelEndpointV1Response": { - "properties": { - "endpoint_creation_task_id": { - "type": "string", - "title": "Endpoint Creation Task Id" - } - }, - "type": "object", - "required": [ - "endpoint_creation_task_id" - ], - "title": "UpdateLLMModelEndpointV1Response" - }, - "UpdateModelEndpointV1Request": { - "properties": { - "model_bundle_id": { - "title": "Model Bundle Id", - "type": "string", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "minimum": 0.0, - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "min_workers": { - "title": "Min Workers", - "type": "integer", - "minimum": 0.0, - "nullable": true - }, - "max_workers": { - "title": "Max Workers", - "type": "integer", - "minimum": 0.0, - "nullable": true - }, - "per_worker": { - "title": "Per Worker", - "type": "integer", - "exclusiveMinimum": 0.0, - "nullable": true - }, - "concurrent_requests_per_worker": { - "title": "Concurrent Requests Per Worker", - "type": "integer", - "exclusiveMinimum": 0.0, - "nullable": true - }, - "labels": { - "title": "Labels", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "title": "UpdateModelEndpointV1Request" - }, - "UpdateModelEndpointV1Response": { - "properties": { - "endpoint_creation_task_id": { - "type": "string", - "title": "Endpoint Creation Task Id" - } - }, - "type": "object", - "required": [ - "endpoint_creation_task_id" - ], - "title": "UpdateModelEndpointV1Response" - }, - "UpdateSGLangModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "model_name": { - "title": "Model Name", - "type": "string", - "nullable": true - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "nullable": true - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "sglang", - "enum": [ - "sglang" - ] - }, - "inference_framework_image_tag": { - "title": "Inference Framework Image Tag", - "type": "string", - "nullable": true - }, - "num_shards": { - "title": "Num Shards", - "type": "integer", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "force_bundle_recreation": { - "title": "Force Bundle Recreation", - "default": false, - "type": "boolean", - "nullable": true - }, - "min_workers": { - "title": "Min Workers", - "type": "integer", - "nullable": true - }, - "max_workers": { - "title": "Max Workers", - "type": "integer", - "nullable": true - }, - "per_worker": { - "title": "Per Worker", - "type": "integer", - "nullable": true - }, - "labels": { - "title": "Labels", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "trust_remote_code": { - "title": "Trust Remote Code", - "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", - "default": false, - "type": "boolean", - "nullable": true - }, - "tp_size": { - "title": "Tp Size", - "description": "The tensor parallel size.", - "type": "integer", - "nullable": true - }, - "skip_tokenizer_init": { - "title": "Skip Tokenizer Init", - "description": "If set, skip init tokenizer and pass input_ids in generate request", - "type": "boolean", - "nullable": true - }, - "load_format": { - "title": "Load Format", - "description": "The format of the model weights to load.", - "type": "string", - "nullable": true - }, - "dtype": { - "title": "Dtype", - "description": "Data type for model weights and activations.", - "type": "string", - "nullable": true - }, - "kv_cache_dtype": { - "title": "Kv Cache Dtype", - "description": "Data type for kv cache storage. \"auto\" will use model data type.", - "type": "string", - "nullable": true - }, - "quantization_param_path": { - "title": "Quantization Param Path", - "description": "Path to the JSON file containing the KV cache scaling factors.", - "type": "string", - "nullable": true - }, - "quantization": { - "title": "Quantization", - "description": "The quantization method.", - "type": "string", - "nullable": true - }, - "context_length": { - "title": "Context Length", - "description": "The model's maximum context length.", - "type": "integer", - "nullable": true - }, - "device": { - "title": "Device", - "description": "The device type.", - "type": "string", - "nullable": true - }, - "served_model_name": { - "title": "Served Model Name", - "description": "Override the model name returned by the v1/models endpoint in OpenAI API server.", - "type": "string", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "The builtin chat template name or path of the chat template file.", - "type": "string", - "nullable": true - }, - "is_embedding": { - "title": "Is Embedding", - "description": "Whether to use a CausalLM as an embedding model.", - "type": "boolean", - "nullable": true - }, - "revision": { - "title": "Revision", - "description": "The specific model version to use.", - "type": "string", - "nullable": true - }, - "mem_fraction_static": { - "title": "Mem Fraction Static", - "description": "The fraction of the memory used for static allocation.", - "type": "number", - "nullable": true - }, - "max_running_requests": { - "title": "Max Running Requests", - "description": "The maximum number of running requests.", - "type": "integer", - "nullable": true - }, - "max_total_tokens": { - "title": "Max Total Tokens", - "description": "The maximum number of tokens in the memory pool.", - "type": "integer", - "nullable": true - }, - "chunked_prefill_size": { - "title": "Chunked Prefill Size", - "description": "The maximum number of tokens in a chunk for the chunked prefill.", - "type": "integer", - "nullable": true - }, - "max_prefill_tokens": { - "title": "Max Prefill Tokens", - "description": "The maximum number of tokens in a prefill batch.", - "type": "integer", - "nullable": true - }, - "schedule_policy": { - "title": "Schedule Policy", - "description": "The scheduling policy of the requests.", - "type": "string", - "nullable": true - }, - "schedule_conservativeness": { - "title": "Schedule Conservativeness", - "description": "How conservative the schedule policy is.", - "type": "number", - "nullable": true - }, - "cpu_offload_gb": { - "title": "Cpu Offload Gb", - "description": "How many GBs of RAM to reserve for CPU offloading", - "type": "integer", - "nullable": true - }, - "prefill_only_one_req": { - "title": "Prefill Only One Req", - "description": "If true, we only prefill one request at one prefill batch", - "type": "boolean", - "nullable": true - }, - "stream_interval": { - "title": "Stream Interval", - "description": "The interval for streaming in terms of the token length.", - "type": "integer", - "nullable": true - }, - "random_seed": { - "title": "Random Seed", - "description": "The random seed.", - "type": "integer", - "nullable": true - }, - "constrained_json_whitespace_pattern": { - "title": "Constrained Json Whitespace Pattern", - "description": "Regex pattern for syntactic whitespaces allowed in JSON constrained output.", - "type": "string", - "nullable": true - }, - "watchdog_timeout": { - "title": "Watchdog Timeout", - "description": "Set watchdog timeout in seconds.", - "type": "number", - "nullable": true - }, - "download_dir": { - "title": "Download Dir", - "description": "Model download directory.", - "type": "string", - "nullable": true - }, - "base_gpu_id": { - "title": "Base Gpu Id", - "description": "The base GPU ID to start allocating GPUs from.", - "type": "integer", - "nullable": true - }, - "log_level": { - "title": "Log Level", - "description": "The logging level of all loggers.", - "type": "string", - "nullable": true - }, - "log_level_http": { - "title": "Log Level Http", - "description": "The logging level of HTTP server.", - "type": "string", - "nullable": true - }, - "log_requests": { - "title": "Log Requests", - "description": "Log the inputs and outputs of all requests.", - "type": "boolean", - "nullable": true - }, - "show_time_cost": { - "title": "Show Time Cost", - "description": "Show time cost of custom marks.", - "type": "boolean", - "nullable": true - }, - "enable_metrics": { - "title": "Enable Metrics", - "description": "Enable log prometheus metrics.", - "type": "boolean", - "nullable": true - }, - "decode_log_interval": { - "title": "Decode Log Interval", - "description": "The log interval of decode batch.", - "type": "integer", - "nullable": true - }, - "api_key": { - "title": "Api Key", - "description": "Set API key of the server.", - "type": "string", - "nullable": true - }, - "file_storage_pth": { - "title": "File Storage Pth", - "description": "The path of the file storage in backend.", - "type": "string", - "nullable": true - }, - "enable_cache_report": { - "title": "Enable Cache Report", - "description": "Return number of cached tokens in usage.prompt_tokens_details.", - "type": "boolean", - "nullable": true - }, - "data_parallel_size": { - "title": "Data Parallel Size", - "description": "The data parallelism size.", - "type": "integer", - "nullable": true - }, - "load_balance_method": { - "title": "Load Balance Method", - "description": "The load balancing strategy for data parallelism.", - "type": "string", - "nullable": true - }, - "expert_parallel_size": { - "title": "Expert Parallel Size", - "description": "The expert parallelism size.", - "type": "integer", - "nullable": true - }, - "dist_init_addr": { - "title": "Dist Init Addr", - "description": "The host address for initializing distributed backend.", - "type": "string", - "nullable": true - }, - "nnodes": { - "title": "Nnodes", - "description": "The number of nodes.", - "type": "integer", - "nullable": true - }, - "node_rank": { - "title": "Node Rank", - "description": "The node rank.", - "type": "integer", - "nullable": true - }, - "json_model_override_args": { - "title": "Json Model Override Args", - "description": "A dictionary in JSON string format used to override default model configurations.", - "type": "string", - "nullable": true - }, - "lora_paths": { - "title": "Lora Paths", - "description": "The list of LoRA adapters.", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "max_loras_per_batch": { - "title": "Max Loras Per Batch", - "description": "Maximum number of adapters for a running batch.", - "type": "integer", - "nullable": true - }, - "attention_backend": { - "title": "Attention Backend", - "description": "Choose the kernels for attention layers.", - "type": "string", - "nullable": true - }, - "sampling_backend": { - "title": "Sampling Backend", - "description": "Choose the kernels for sampling layers.", - "type": "string", - "nullable": true - }, - "grammar_backend": { - "title": "Grammar Backend", - "description": "Choose the backend for grammar-guided decoding.", - "type": "string", - "nullable": true - }, - "speculative_algorithm": { - "title": "Speculative Algorithm", - "description": "Speculative algorithm.", - "type": "string", - "nullable": true - }, - "speculative_draft_model_path": { - "title": "Speculative Draft Model Path", - "description": "The path of the draft model weights.", - "type": "string", - "nullable": true - }, - "speculative_num_steps": { - "title": "Speculative Num Steps", - "description": "The number of steps sampled from draft model in Speculative Decoding.", - "type": "integer", - "nullable": true - }, - "speculative_num_draft_tokens": { - "title": "Speculative Num Draft Tokens", - "description": "The number of token sampled from draft model in Speculative Decoding.", - "type": "integer", - "nullable": true - }, - "speculative_eagle_topk": { - "title": "Speculative Eagle Topk", - "description": "The number of token sampled from draft model in eagle2 each step.", - "type": "integer", - "nullable": true - }, - "enable_double_sparsity": { - "title": "Enable Double Sparsity", - "description": "Enable double sparsity attention", - "type": "boolean", - "nullable": true - }, - "ds_channel_config_path": { - "title": "Ds Channel Config Path", - "description": "The path of the double sparsity channel config", - "type": "string", - "nullable": true - }, - "ds_heavy_channel_num": { - "title": "Ds Heavy Channel Num", - "description": "The number of heavy channels in double sparsity attention", - "type": "integer", - "nullable": true - }, - "ds_heavy_token_num": { - "title": "Ds Heavy Token Num", - "description": "The number of heavy tokens in double sparsity attention", - "type": "integer", - "nullable": true - }, - "ds_heavy_channel_type": { - "title": "Ds Heavy Channel Type", - "description": "The type of heavy channels in double sparsity attention", - "type": "string", - "nullable": true - }, - "ds_sparse_decode_threshold": { - "title": "Ds Sparse Decode Threshold", - "description": "The threshold for sparse decoding in double sparsity attention", - "type": "integer", - "nullable": true - }, - "disable_radix_cache": { - "title": "Disable Radix Cache", - "description": "Disable RadixAttention for prefix caching.", - "type": "boolean", - "nullable": true - }, - "disable_jump_forward": { - "title": "Disable Jump Forward", - "description": "Disable jump-forward for grammar-guided decoding.", - "type": "boolean", - "nullable": true - }, - "disable_cuda_graph": { - "title": "Disable Cuda Graph", - "description": "Disable cuda graph.", - "type": "boolean", - "nullable": true - }, - "disable_cuda_graph_padding": { - "title": "Disable Cuda Graph Padding", - "description": "Disable cuda graph when padding is needed.", - "type": "boolean", - "nullable": true - }, - "disable_outlines_disk_cache": { - "title": "Disable Outlines Disk Cache", - "description": "Disable disk cache of outlines.", - "type": "boolean", - "nullable": true - }, - "disable_custom_all_reduce": { - "title": "Disable Custom All Reduce", - "description": "Disable the custom all-reduce kernel.", - "type": "boolean", - "nullable": true - }, - "disable_mla": { - "title": "Disable Mla", - "description": "Disable Multi-head Latent Attention (MLA) for DeepSeek-V2.", - "type": "boolean", - "nullable": true - }, - "disable_overlap_schedule": { - "title": "Disable Overlap Schedule", - "description": "Disable the overlap scheduler.", - "type": "boolean", - "nullable": true - }, - "enable_mixed_chunk": { - "title": "Enable Mixed Chunk", - "description": "Enable mixing prefill and decode in a batch when using chunked prefill.", - "type": "boolean", - "nullable": true - }, - "enable_dp_attention": { - "title": "Enable Dp Attention", - "description": "Enable data parallelism for attention and tensor parallelism for FFN.", - "type": "boolean", - "nullable": true - }, - "enable_ep_moe": { - "title": "Enable Ep Moe", - "description": "Enable expert parallelism for moe.", - "type": "boolean", - "nullable": true - }, - "enable_torch_compile": { - "title": "Enable Torch Compile", - "description": "Optimize the model with torch.compile.", - "type": "boolean", - "nullable": true - }, - "torch_compile_max_bs": { - "title": "Torch Compile Max Bs", - "description": "Set the maximum batch size when using torch compile.", - "type": "integer", - "nullable": true - }, - "cuda_graph_max_bs": { - "title": "Cuda Graph Max Bs", - "description": "Set the maximum batch size for cuda graph.", - "type": "integer", - "nullable": true - }, - "cuda_graph_bs": { - "title": "Cuda Graph Bs", - "description": "Set the list of batch sizes for cuda graph.", - "items": { - "type": "integer" - }, - "type": "array", - "nullable": true - }, - "torchao_config": { - "title": "Torchao Config", - "description": "Optimize the model with torchao.", - "type": "string", - "nullable": true - }, - "enable_nan_detection": { - "title": "Enable Nan Detection", - "description": "Enable the NaN detection for debugging purposes.", - "type": "boolean", - "nullable": true - }, - "enable_p2p_check": { - "title": "Enable P2P Check", - "description": "Enable P2P check for GPU access.", - "type": "boolean", - "nullable": true - }, - "triton_attention_reduce_in_fp32": { - "title": "Triton Attention Reduce In Fp32", - "description": "Cast the intermediate attention results to fp32.", - "type": "boolean", - "nullable": true - }, - "triton_attention_num_kv_splits": { - "title": "Triton Attention Num Kv Splits", - "description": "The number of KV splits in flash decoding Triton kernel.", - "type": "integer", - "nullable": true - }, - "num_continuous_decode_steps": { - "title": "Num Continuous Decode Steps", - "description": "Run multiple continuous decoding steps to reduce scheduling overhead.", - "type": "integer", - "nullable": true - }, - "delete_ckpt_after_loading": { - "title": "Delete Ckpt After Loading", - "description": "Delete the model checkpoint after loading the model.", - "type": "boolean", - "nullable": true - }, - "enable_memory_saver": { - "title": "Enable Memory Saver", - "description": "Allow saving memory using release_memory_occupation and resume_memory_occupation", - "type": "boolean", - "nullable": true - }, - "allow_auto_truncate": { - "title": "Allow Auto Truncate", - "description": "Allow automatically truncating requests that exceed the maximum input length.", - "type": "boolean", - "nullable": true - }, - "enable_custom_logit_processor": { - "title": "Enable Custom Logit Processor", - "description": "Enable users to pass custom logit processors to the server.", - "type": "boolean", - "nullable": true - }, - "tool_call_parser": { - "title": "Tool Call Parser", - "description": "Specify the parser for handling tool-call interactions.", - "type": "string", - "nullable": true - }, - "huggingface_repo": { - "title": "Huggingface Repo", - "description": "The Hugging Face repository ID.", - "type": "string", - "nullable": true - } - }, - "type": "object", - "title": "UpdateSGLangModelEndpointRequest" - }, - "UpdateTextGenerationInferenceModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "model_name": { - "title": "Model Name", - "type": "string", - "nullable": true - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "nullable": true - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "text_generation_inference", - "enum": [ - "text_generation_inference" - ] - }, - "inference_framework_image_tag": { - "title": "Inference Framework Image Tag", - "type": "string", - "nullable": true - }, - "num_shards": { - "title": "Num Shards", - "type": "integer", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "force_bundle_recreation": { - "title": "Force Bundle Recreation", - "default": false, - "type": "boolean", - "nullable": true - }, - "min_workers": { - "title": "Min Workers", - "type": "integer", - "nullable": true - }, - "max_workers": { - "title": "Max Workers", - "type": "integer", - "nullable": true - }, - "per_worker": { - "title": "Per Worker", - "type": "integer", - "nullable": true - }, - "labels": { - "title": "Labels", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - } - }, - "type": "object", - "title": "UpdateTextGenerationInferenceModelEndpointRequest" - }, - "UpdateTriggerV1Request": { - "properties": { - "cron_schedule": { - "title": "Cron Schedule", - "type": "string", - "nullable": true - }, - "suspend": { - "title": "Suspend", - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "title": "UpdateTriggerV1Request" - }, - "UpdateTriggerV1Response": { - "properties": { - "success": { - "type": "boolean", - "title": "Success" - } - }, - "type": "object", - "required": [ - "success" - ], - "title": "UpdateTriggerV1Response" - }, - "UpdateVLLMModelEndpointRequest": { - "properties": { - "quantize": { - "$ref": "#/components/schemas/Quantization", - "nullable": true - }, - "checkpoint_path": { - "title": "Checkpoint Path", - "type": "string", - "nullable": true - }, - "post_inference_hooks": { - "title": "Post Inference Hooks", - "items": { - "type": "string" - }, - "type": "array", - "nullable": true - }, - "cpus": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Cpus", - "nullable": true - }, - "gpus": { - "title": "Gpus", - "type": "integer", - "nullable": true - }, - "memory": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Memory", - "nullable": true - }, - "gpu_type": { - "$ref": "#/components/schemas/GpuType", - "nullable": true - }, - "storage": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - } - ], - "title": "Storage", - "nullable": true - }, - "nodes_per_worker": { - "title": "Nodes Per Worker", - "type": "integer", - "nullable": true - }, - "optimize_costs": { - "title": "Optimize Costs", - "type": "boolean", - "nullable": true - }, - "prewarm": { - "title": "Prewarm", - "type": "boolean", - "nullable": true - }, - "high_priority": { - "title": "High Priority", - "type": "boolean", - "nullable": true - }, - "billing_tags": { - "title": "Billing Tags", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "default_callback_url": { - "title": "Default Callback Url", - "type": "string", - "nullable": true - }, - "default_callback_auth": { - "$ref": "#/components/schemas/CallbackAuth", - "nullable": true - }, - "public_inference": { - "title": "Public Inference", - "default": true, - "type": "boolean", - "nullable": true - }, - "chat_template_override": { - "title": "Chat Template Override", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "enable_startup_metrics": { - "title": "Enable Startup Metrics", - "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", - "default": false, - "type": "boolean", - "nullable": true - }, - "model_name": { - "title": "Model Name", - "type": "string", - "nullable": true - }, - "source": { - "$ref": "#/components/schemas/LLMSource", - "nullable": true - }, - "inference_framework": { - "type": "string", - "title": "Inference Framework", - "default": "vllm", - "enum": [ - "vllm" - ] - }, - "inference_framework_image_tag": { - "title": "Inference Framework Image Tag", - "type": "string", - "nullable": true - }, - "num_shards": { - "title": "Num Shards", - "type": "integer", - "nullable": true - }, - "metadata": { - "title": "Metadata", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "force_bundle_recreation": { - "title": "Force Bundle Recreation", - "default": false, - "type": "boolean", - "nullable": true - }, - "min_workers": { - "title": "Min Workers", - "type": "integer", - "nullable": true - }, - "max_workers": { - "title": "Max Workers", - "type": "integer", - "nullable": true - }, - "per_worker": { - "title": "Per Worker", - "type": "integer", - "nullable": true - }, - "labels": { - "title": "Labels", - "additionalProperties": { - "type": "string" - }, - "type": "object", - "nullable": true - }, - "max_gpu_memory_utilization": { - "title": "Max Gpu Memory Utilization", - "description": "Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig", - "type": "number", - "nullable": true - }, - "attention_backend": { - "title": "Attention Backend", - "description": "Attention backend to use for vLLM. Default to None.", - "type": "string", - "nullable": true - }, - "max_model_len": { - "title": "Max Model Len", - "description": "Model context length, If unspecified, will be automatically derived from the model config", - "type": "integer", - "nullable": true - }, - "max_num_seqs": { - "title": "Max Num Seqs", - "description": "Maximum number of sequences per iteration", - "type": "integer", - "nullable": true - }, - "enforce_eager": { - "title": "Enforce Eager", - "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", - "type": "boolean", - "nullable": true - }, - "trust_remote_code": { - "title": "Trust Remote Code", - "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", - "default": false, - "type": "boolean", - "nullable": true - }, - "pipeline_parallel_size": { - "title": "Pipeline Parallel Size", - "description": "Number of pipeline stages. Default to None.", - "type": "integer", - "nullable": true - }, - "tensor_parallel_size": { - "title": "Tensor Parallel Size", - "description": "Number of tensor parallel replicas. Default to None.", - "type": "integer", - "nullable": true - }, - "quantization": { - "title": "Quantization", - "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", - "type": "string", - "nullable": true - }, - "disable_log_requests": { - "title": "Disable Log Requests", - "description": "Disable logging requests. Default to None.", - "type": "boolean", - "nullable": true - }, - "chat_template": { - "title": "Chat Template", - "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", - "type": "string", - "nullable": true - }, - "tool_call_parser": { - "title": "Tool Call Parser", - "description": "Tool call parser", - "type": "string", - "nullable": true - }, - "enable_auto_tool_choice": { - "title": "Enable Auto Tool Choice", - "description": "Enable auto tool choice", - "type": "boolean", - "nullable": true - }, - "load_format": { - "title": "Load Format", - "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", - "type": "string", - "nullable": true - }, - "config_format": { - "title": "Config Format", - "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", - "type": "string", - "nullable": true - }, - "tokenizer_mode": { - "title": "Tokenizer Mode", - "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", - "type": "string", - "nullable": true - }, - "limit_mm_per_prompt": { - "title": "Limit Mm Per Prompt", - "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", - "type": "string", - "nullable": true - }, - "max_num_batched_tokens": { - "title": "Max Num Batched Tokens", - "description": "Maximum number of batched tokens per iteration", - "type": "integer", - "nullable": true - }, - "tokenizer": { - "title": "Tokenizer", - "description": "Name or path of the huggingface tokenizer to use.", - "type": "string", - "nullable": true - }, - "dtype": { - "title": "Dtype", - "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", - "type": "string", - "nullable": true - }, - "seed": { - "title": "Seed", - "description": "Random seed for reproducibility.", - "type": "integer", - "nullable": true - }, - "revision": { - "title": "Revision", - "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "code_revision": { - "title": "Code Revision", - "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "rope_scaling": { - "title": "Rope Scaling", - "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "tokenizer_revision": { - "title": "Tokenizer Revision", - "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", - "type": "string", - "nullable": true - }, - "quantization_param_path": { - "title": "Quantization Param Path", - "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", - "type": "string", - "nullable": true - }, - "max_seq_len_to_capture": { - "title": "Max Seq Len To Capture", - "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", - "type": "integer", - "nullable": true - }, - "disable_sliding_window": { - "title": "Disable Sliding Window", - "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", - "type": "boolean", - "nullable": true - }, - "skip_tokenizer_init": { - "title": "Skip Tokenizer Init", - "description": "If true, skip initialization of tokenizer and detokenizer.", - "type": "boolean", - "nullable": true - }, - "served_model_name": { - "title": "Served Model Name", - "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", - "type": "string", - "nullable": true - }, - "override_neuron_config": { - "title": "Override Neuron Config", - "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "mm_processor_kwargs": { - "title": "Mm Processor Kwargs", - "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "block_size": { - "title": "Block Size", - "description": "Size of a cache block in number of tokens.", - "type": "integer", - "nullable": true - }, - "gpu_memory_utilization": { - "title": "Gpu Memory Utilization", - "description": "Fraction of GPU memory to use for the vLLM execution.", - "type": "number", - "nullable": true - }, - "swap_space": { - "title": "Swap Space", - "description": "Size of the CPU swap space per GPU (in GiB).", - "type": "number", - "nullable": true - }, - "cache_dtype": { - "title": "Cache Dtype", - "description": "Data type for kv cache storage.", - "type": "string", - "nullable": true - }, - "num_gpu_blocks_override": { - "title": "Num Gpu Blocks Override", - "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", - "type": "integer", - "nullable": true - }, - "enable_prefix_caching": { - "title": "Enable Prefix Caching", - "description": "Enables automatic prefix caching.", - "type": "boolean", - "nullable": true - } - }, - "type": "object", - "title": "UpdateVLLMModelEndpointRequest" - }, - "UploadFileResponse": { - "properties": { - "id": { - "type": "string", - "title": "Id", - "description": "ID of the uploaded file." - } - }, - "type": "object", - "required": [ - "id" - ], - "title": "UploadFileResponse", - "description": "Response object for uploading a file." - }, - "UrlCitation": { - "properties": { - "end_index": { - "type": "integer", - "title": "End Index", - "description": "The index of the last character of the URL citation in the message." - }, - "start_index": { - "type": "integer", - "title": "Start Index", - "description": "The index of the first character of the URL citation in the message." - }, - "url": { - "type": "string", - "title": "Url", - "description": "The URL of the web resource." - }, - "title": { - "type": "string", - "title": "Title", - "description": "The title of the web resource." - } - }, - "type": "object", - "required": [ - "end_index", - "start_index", - "url", - "title" - ], - "title": "UrlCitation" - }, - "UserLocation": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "description": "The type of location approximation. Always `approximate`.\n", - "enum": [ - "approximate" - ] - }, - "approximate": { - "$ref": "#/components/schemas/WebSearchLocation" - } - }, - "type": "object", - "required": [ - "type", - "approximate" - ], - "title": "UserLocation" - }, - "ValidationError": { - "properties": { - "loc": { - "items": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - } - ] - }, - "type": "array", - "title": "Location" - }, - "msg": { - "type": "string", - "title": "Message" - }, - "type": { - "type": "string", - "title": "Error Type" - } - }, - "type": "object", - "required": [ - "loc", - "msg", - "type" - ], - "title": "ValidationError" - }, - "VoiceIdsShared": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "string", - "enum": [ - "alloy", - "ash", - "ballad", - "coral", - "echo", - "fable", - "onyx", - "nova", - "sage", - "shimmer", - "verse" - ] - } - ], - "title": "VoiceIdsShared" - }, - "WebSearchContextSize": { - "type": "string", - "enum": [ - "low", - "medium", - "high" - ], - "title": "WebSearchContextSize", - "description": "High level guidance for the amount of context window space to use for the \nsearch. One of `low`, `medium`, or `high`. `medium` is the default.\n" - }, - "WebSearchLocation": { - "properties": { - "country": { - "title": "Country", - "description": "The two-letter \n[ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,\ne.g. `US`.\n", - "type": "string", - "nullable": true - }, - "region": { - "title": "Region", - "description": "Free text input for the region of the user, e.g. `California`.\n", - "type": "string", - "nullable": true - }, - "city": { - "title": "City", - "description": "Free text input for the city of the user, e.g. `San Francisco`.\n", - "type": "string", - "nullable": true - }, - "timezone": { - "title": "Timezone", - "description": "The [IANA timezone](https://timeapi.io/documentation/iana-timezones) \nof the user, e.g. `America/Los_Angeles`.\n", - "type": "string", - "nullable": true - } - }, - "type": "object", - "title": "WebSearchLocation" - }, - "WebSearchOptions": { - "properties": { - "user_location": { - "description": "Approximate location parameters for the search.\n", - "$ref": "#/components/schemas/UserLocation", - "nullable": true - }, - "search_context_size": { - "$ref": "#/components/schemas/WebSearchContextSize", - "nullable": true - } - }, - "type": "object", - "title": "WebSearchOptions" - }, - "ZipArtifactFlavor": { - "properties": { - "requirements": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Requirements" - }, - "framework": { - "oneOf": [ - { - "$ref": "#/components/schemas/PytorchFramework" - }, - { - "$ref": "#/components/schemas/TensorflowFramework" - }, - { - "$ref": "#/components/schemas/CustomFramework" - } - ], - "title": "Framework", - "discriminator": { - "propertyName": "framework_type", - "mapping": { - "custom_base_image": "#/components/schemas/CustomFramework", - "pytorch": "#/components/schemas/PytorchFramework", - "tensorflow": "#/components/schemas/TensorflowFramework" - } - } - }, - "app_config": { - "title": "App Config", - "additionalProperties": true, - "type": "object", - "nullable": true - }, - "location": { - "type": "string", - "title": "Location" - }, - "flavor": { - "type": "string", - "title": "Flavor", - "enum": [ - "zip_artifact" - ] - }, - "load_predict_fn_module_path": { - "type": "string", - "title": "Load Predict Fn Module Path" - }, - "load_model_fn_module_path": { - "type": "string", - "title": "Load Model Fn Module Path" - } - }, - "type": "object", - "required": [ - "requirements", - "framework", - "location", - "flavor", - "load_predict_fn_module_path", - "load_model_fn_module_path" - ], - "title": "ZipArtifactFlavor", - "description": "This is the entity-layer class for the Model Bundle flavor of a zip artifact." - }, - "CreateLLMModelEndpointV1Request": { - "oneOf": [ - { - "$ref": "#/components/schemas/CreateVLLMModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/CreateSGLangModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/CreateDeepSpeedModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/CreateTextGenerationInferenceModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/CreateLightLLMModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/CreateTensorRTLLMModelEndpointRequest" - } - ], - "title": "RootModel[Annotated[Union[Annotated[CreateVLLMModelEndpointRequest, Tag], Annotated[CreateSGLangModelEndpointRequest, Tag], Annotated[CreateDeepSpeedModelEndpointRequest, Tag], Annotated[CreateTextGenerationInferenceModelEndpointRequest, Tag], Annotated[CreateLightLLMModelEndpointRequest, Tag], Annotated[CreateTensorRTLLMModelEndpointRequest, Tag]], Discriminator]]" - }, - "UpdateLLMModelEndpointV1Request": { - "oneOf": [ - { - "$ref": "#/components/schemas/UpdateVLLMModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/UpdateSGLangModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/UpdateDeepSpeedModelEndpointRequest" - }, - { - "$ref": "#/components/schemas/UpdateTextGenerationInferenceModelEndpointRequest" - } - ], - "title": "RootModel[Annotated[Union[Annotated[UpdateVLLMModelEndpointRequest, Tag], Annotated[UpdateSGLangModelEndpointRequest, Tag], Annotated[UpdateDeepSpeedModelEndpointRequest, Tag], Annotated[UpdateTextGenerationInferenceModelEndpointRequest, Tag]], Discriminator]]" - } - }, - "securitySchemes": { - "HTTPBasic": { - "type": "http", - "scheme": "basic" - }, - "OAuth2PasswordBearer": { - "type": "oauth2", - "flows": { - "password": { - "scopes": {}, - "tokenUrl": "token" - } - } - } - } - } -} \ No newline at end of file diff --git a/openapitools.json b/openapitools.json deleted file mode 100644 index b99f7e67..00000000 --- a/openapitools.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "$schema": "./node_modules/@openapitools/openapi-generator-cli/config.schema.json", - "spaces": 2, - "generator-cli": { - "version": "6.4.0", - "generators": { - "python": { - "generatorName": "python", - "output": "#{cwd}", - "glob": "openapi.json", - "packageName": "launch.api_client", - "additionalProperties": { - "packageVersion": "1.1.2" - }, - "globalProperty": { - "skipFormModel": false - }, - "skipValidateSpec": true - } - } - } -} diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 7b46fd0c..00000000 --- a/poetry.lock +++ /dev/null @@ -1,2086 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - -[[package]] -name = "astroid" -version = "2.15.8" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.7.2" -groups = ["dev"] -files = [ - {file = "astroid-2.15.8-py3-none-any.whl", hash = "sha256:1aa149fc5c6589e3d0ece885b4491acd80af4f087baafa3fb5203b113e68cd3c"}, - {file = "astroid-2.15.8.tar.gz", hash = "sha256:6c107453dffee9055899705de3c9ead36e74119cee151e5a9aaf7f0b0e020a6a"}, -] - -[package.dependencies] -lazy-object-proxy = ">=1.4.0" -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} -wrapt = [ - {version = ">=1.11,<2", markers = "python_version < \"3.11\""}, - {version = ">=1.14,<2", markers = "python_version >= \"3.11\""}, -] - -[[package]] -name = "astunparse" -version = "1.6.3" -description = "An AST unparser for Python" -optional = false -python-versions = "*" -groups = ["dev"] -markers = "python_version < \"3.9\"" -files = [ - {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, - {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, -] - -[package.dependencies] -six = ">=1.6.1,<2.0" -wheel = ">=0.23.0,<1.0" - -[[package]] -name = "babel" -version = "2.15.0" -description = "Internationalization utilities" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "Babel-2.15.0-py3-none-any.whl", hash = "sha256:08706bdad8d0a3413266ab61bd6c34d0c28d6e1e7badf40a2cebe67644e2e1fb"}, - {file = "babel-2.15.0.tar.gz", hash = "sha256:8daf0e265d05768bc6c7a314cf1321e9a123afc328cc635c18622a2f30a04413"}, -] - -[package.dependencies] -pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} - -[package.extras] -dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] - -[[package]] -name = "black" -version = "23.12.1" -description = "The uncompromising code formatter." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"}, - {file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"}, - {file = "black-23.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920b569dc6b3472513ba6ddea21f440d4b4c699494d2e972a1753cdc25df7b0"}, - {file = "black-23.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:3fa4be75ef2a6b96ea8d92b1587dd8cb3a35c7e3d51f0738ced0781c3aa3a5a3"}, - {file = "black-23.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8d4df77958a622f9b5a4c96edb4b8c0034f8434032ab11077ec6c56ae9f384ba"}, - {file = "black-23.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:602cfb1196dc692424c70b6507593a2b29aac0547c1be9a1d1365f0d964c353b"}, - {file = "black-23.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c4352800f14be5b4864016882cdba10755bd50805c95f728011bcb47a4afd59"}, - {file = "black-23.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:0808494f2b2df923ffc5723ed3c7b096bd76341f6213989759287611e9837d50"}, - {file = "black-23.12.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:25e57fd232a6d6ff3f4478a6fd0580838e47c93c83eaf1ccc92d4faf27112c4e"}, - {file = "black-23.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2d9e13db441c509a3763a7a3d9a49ccc1b4e974a47be4e08ade2a228876500ec"}, - {file = "black-23.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1bd9c210f8b109b1762ec9fd36592fdd528485aadb3f5849b2740ef17e674e"}, - {file = "black-23.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:ae76c22bde5cbb6bfd211ec343ded2163bba7883c7bc77f6b756a1049436fbb9"}, - {file = "black-23.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1fa88a0f74e50e4487477bc0bb900c6781dbddfdfa32691e780bf854c3b4a47f"}, - {file = "black-23.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a4d6a9668e45ad99d2f8ec70d5c8c04ef4f32f648ef39048d010b0689832ec6d"}, - {file = "black-23.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b18fb2ae6c4bb63eebe5be6bd869ba2f14fd0259bda7d18a46b764d8fb86298a"}, - {file = "black-23.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:c04b6d9d20e9c13f43eee8ea87d44156b8505ca8a3c878773f68b4e4812a421e"}, - {file = "black-23.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e1b38b3135fd4c025c28c55ddfc236b05af657828a8a6abe5deec419a0b7055"}, - {file = "black-23.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4f0031eaa7b921db76decd73636ef3a12c942ed367d8c3841a0739412b260a54"}, - {file = "black-23.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97e56155c6b737854e60a9ab1c598ff2533d57e7506d97af5481141671abf3ea"}, - {file = "black-23.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:dd15245c8b68fe2b6bd0f32c1556509d11bb33aec9b5d0866dd8e2ed3dba09c2"}, - {file = "black-23.12.1-py3-none-any.whl", hash = "sha256:78baad24af0f033958cad29731e27363183e140962595def56423e626f4bee3e"}, - {file = "black-23.12.1.tar.gz", hash = "sha256:4ce3ef14ebe8d9509188014d96af1c456a910d5b5cbf434a09fef7e024b3d0d5"}, -] - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -packaging = ">=22.0" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4) ; sys_platform != \"win32\" or implementation_name != \"pypy\"", "aiohttp (>=3.7.4,!=3.9.0) ; sys_platform == \"win32\" and implementation_name == \"pypy\""] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - -[[package]] -name = "certifi" -version = "2024.7.4" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -groups = ["main", "dev"] -files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, -] - -[[package]] -name = "cfgv" -version = "3.4.0" -description = "Validate configuration and produce human readable error messages." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, - {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -groups = ["main", "dev"] -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, -] - -[[package]] -name = "click" -version = "8.1.7" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "cloudpickle" -version = "2.2.1" -description = "Extended pickling support for Python objects" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "cloudpickle-2.2.1-py3-none-any.whl", hash = "sha256:61f594d1f4c295fa5cd9014ceb3a1fc4a70b0de1164b94fbc2d854ccba056f9f"}, - {file = "cloudpickle-2.2.1.tar.gz", hash = "sha256:d89684b8de9e34a2a43b3460fbca07d09d6e25ce858df4d5a44240403b6178f5"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev"] -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] -markers = {main = "platform_system == \"Windows\""} - -[[package]] -name = "commonmark" -version = "0.9.1" -description = "Python parser for the CommonMark Markdown spec" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, - {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, -] - -[package.extras] -test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] - -[[package]] -name = "coverage" -version = "6.5.0" -description = "Code coverage measurement for Python" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "coverage-6.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ef8674b0ee8cc11e2d574e3e2998aea5df5ab242e012286824ea3c6970580e53"}, - {file = "coverage-6.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:784f53ebc9f3fd0e2a3f6a78b2be1bd1f5575d7863e10c6e12504f240fd06660"}, - {file = "coverage-6.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4a5be1748d538a710f87542f22c2cad22f80545a847ad91ce45e77417293eb4"}, - {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83516205e254a0cb77d2d7bb3632ee019d93d9f4005de31dca0a8c3667d5bc04"}, - {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af4fffaffc4067232253715065e30c5a7ec6faac36f8fc8d6f64263b15f74db0"}, - {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:97117225cdd992a9c2a5515db1f66b59db634f59d0679ca1fa3fe8da32749cae"}, - {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a1170fa54185845505fbfa672f1c1ab175446c887cce8212c44149581cf2d466"}, - {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:11b990d520ea75e7ee8dcab5bc908072aaada194a794db9f6d7d5cfd19661e5a"}, - {file = "coverage-6.5.0-cp310-cp310-win32.whl", hash = "sha256:5dbec3b9095749390c09ab7c89d314727f18800060d8d24e87f01fb9cfb40b32"}, - {file = "coverage-6.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:59f53f1dc5b656cafb1badd0feb428c1e7bc19b867479ff72f7a9dd9b479f10e"}, - {file = "coverage-6.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4a5375e28c5191ac38cca59b38edd33ef4cc914732c916f2929029b4bfb50795"}, - {file = "coverage-6.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4ed2820d919351f4167e52425e096af41bfabacb1857186c1ea32ff9983ed75"}, - {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33a7da4376d5977fbf0a8ed91c4dffaaa8dbf0ddbf4c8eea500a2486d8bc4d7b"}, - {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8fb6cf131ac4070c9c5a3e21de0f7dc5a0fbe8bc77c9456ced896c12fcdad91"}, - {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a6b7d95969b8845250586f269e81e5dfdd8ff828ddeb8567a4a2eaa7313460c4"}, - {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1ef221513e6f68b69ee9e159506d583d31aa3567e0ae84eaad9d6ec1107dddaa"}, - {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cca4435eebea7962a52bdb216dec27215d0df64cf27fc1dd538415f5d2b9da6b"}, - {file = "coverage-6.5.0-cp311-cp311-win32.whl", hash = "sha256:98e8a10b7a314f454d9eff4216a9a94d143a7ee65018dd12442e898ee2310578"}, - {file = "coverage-6.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:bc8ef5e043a2af066fa8cbfc6e708d58017024dc4345a1f9757b329a249f041b"}, - {file = "coverage-6.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4433b90fae13f86fafff0b326453dd42fc9a639a0d9e4eec4d366436d1a41b6d"}, - {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4f05d88d9a80ad3cac6244d36dd89a3c00abc16371769f1340101d3cb899fc3"}, - {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94e2565443291bd778421856bc975d351738963071e9b8839ca1fc08b42d4bef"}, - {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:027018943386e7b942fa832372ebc120155fd970837489896099f5cfa2890f79"}, - {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:255758a1e3b61db372ec2736c8e2a1fdfaf563977eedbdf131de003ca5779b7d"}, - {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:851cf4ff24062c6aec510a454b2584f6e998cada52d4cb58c5e233d07172e50c"}, - {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:12adf310e4aafddc58afdb04d686795f33f4d7a6fa67a7a9d4ce7d6ae24d949f"}, - {file = "coverage-6.5.0-cp37-cp37m-win32.whl", hash = "sha256:b5604380f3415ba69de87a289a2b56687faa4fe04dbee0754bfcae433489316b"}, - {file = "coverage-6.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4a8dbc1f0fbb2ae3de73eb0bdbb914180c7abfbf258e90b311dcd4f585d44bd2"}, - {file = "coverage-6.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d900bb429fdfd7f511f868cedd03a6bbb142f3f9118c09b99ef8dc9bf9643c3c"}, - {file = "coverage-6.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2198ea6fc548de52adc826f62cb18554caedfb1d26548c1b7c88d8f7faa8f6ba"}, - {file = "coverage-6.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c4459b3de97b75e3bd6b7d4b7f0db13f17f504f3d13e2a7c623786289dd670e"}, - {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20c8ac5386253717e5ccc827caad43ed66fea0efe255727b1053a8154d952398"}, - {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b07130585d54fe8dff3d97b93b0e20290de974dc8177c320aeaf23459219c0b"}, - {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dbdb91cd8c048c2b09eb17713b0c12a54fbd587d79adcebad543bc0cd9a3410b"}, - {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3001a203182842a4630e7b8d1a2c7c07ec1b45d3084a83d5d227a3806f530f"}, - {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e07f4a4a9b41583d6eabec04f8b68076ab3cd44c20bd29332c6572dda36f372e"}, - {file = "coverage-6.5.0-cp38-cp38-win32.whl", hash = "sha256:6d4817234349a80dbf03640cec6109cd90cba068330703fa65ddf56b60223a6d"}, - {file = "coverage-6.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:7ccf362abd726b0410bf8911c31fbf97f09f8f1061f8c1cf03dfc4b6372848f6"}, - {file = "coverage-6.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:633713d70ad6bfc49b34ead4060531658dc6dfc9b3eb7d8a716d5873377ab745"}, - {file = "coverage-6.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:95203854f974e07af96358c0b261f1048d8e1083f2de9b1c565e1be4a3a48cfc"}, - {file = "coverage-6.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9023e237f4c02ff739581ef35969c3739445fb059b060ca51771e69101efffe"}, - {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:265de0fa6778d07de30bcf4d9dc471c3dc4314a23a3c6603d356a3c9abc2dfcf"}, - {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f830ed581b45b82451a40faabb89c84e1a998124ee4212d440e9c6cf70083e5"}, - {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7b6be138d61e458e18d8e6ddcddd36dd96215edfe5f1168de0b1b32635839b62"}, - {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42eafe6778551cf006a7c43153af1211c3aaab658d4d66fa5fcc021613d02518"}, - {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:723e8130d4ecc8f56e9a611e73b31219595baa3bb252d539206f7bbbab6ffc1f"}, - {file = "coverage-6.5.0-cp39-cp39-win32.whl", hash = "sha256:d9ecf0829c6a62b9b573c7bb6d4dcd6ba8b6f80be9ba4fc7ed50bf4ac9aecd72"}, - {file = "coverage-6.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc2af30ed0d5ae0b1abdb4ebdce598eafd5b35397d4d75deb341a614d333d987"}, - {file = "coverage-6.5.0-pp36.pp37.pp38-none-any.whl", hash = "sha256:1431986dac3923c5945271f169f59c45b8802a114c8f548d611f2015133df77a"}, - {file = "coverage-6.5.0.tar.gz", hash = "sha256:f642e90754ee3e06b0e7e51bce3379590e76b7f76b708e1a71ff043f87025c84"}, -] - -[package.extras] -toml = ["tomli ; python_full_version <= \"3.11.0a6\""] - -[[package]] -name = "cyclic" -version = "1.0.0" -description = "Handle cyclic relations" -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "cyclic-1.0.0-py3-none-any.whl", hash = "sha256:32d8181d7698f426bce6f14f4c3921ef95b6a84af9f96192b59beb05bc00c3ed"}, - {file = "cyclic-1.0.0.tar.gz", hash = "sha256:ecddd56cb831ee3e6b79f61ecb0ad71caee606c507136867782911aa01c3e5eb"}, -] - -[[package]] -name = "dataclasses-json" -version = "0.5.9" -description = "Easily serialize dataclasses to and from JSON" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "dataclasses-json-0.5.9.tar.gz", hash = "sha256:e9ac87b73edc0141aafbce02b44e93553c3123ad574958f0fe52a534b6707e8e"}, - {file = "dataclasses_json-0.5.9-py3-none-any.whl", hash = "sha256:1280542631df1c375b7bc92e5b86d39e06c44760d7e3571a537b3b8acabf2f0c"}, -] - -[package.dependencies] -marshmallow = ">=3.3.0,<4.0.0" -marshmallow-enum = ">=1.5.1,<2.0.0" -typing-inspect = ">=0.4.0" - -[package.extras] -dev = ["flake8", "hypothesis", "ipython", "mypy (>=0.710)", "portray", "pytest (>=7.2.0)", "setuptools", "simplejson", "twine", "types-dataclasses ; python_version == \"3.6\"", "wheel"] - -[[package]] -name = "deprecation" -version = "2.1.0" -description = "A library to handle automated deprecations" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, - {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, -] - -[package.dependencies] -packaging = "*" - -[[package]] -name = "dill" -version = "0.3.8" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "distlib" -version = "0.3.8" -description = "Distribution utilities" -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "filelock" -version = "3.15.4" -description = "A platform independent file lock." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, -] - -[package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8) ; python_version < \"3.11\""] - -[[package]] -name = "frozendict" -version = "2.4.4" -description = "A simple immutable dictionary" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "frozendict-2.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a59578d47b3949437519b5c39a016a6116b9e787bb19289e333faae81462e59"}, - {file = "frozendict-2.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12a342e439aef28ccec533f0253ea53d75fe9102bd6ea928ff530e76eac38906"}, - {file = "frozendict-2.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f79c26dff10ce11dad3b3627c89bb2e87b9dd5958c2b24325f16a23019b8b94"}, - {file = "frozendict-2.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2bd009cf4fc47972838a91e9b83654dc9a095dc4f2bb3a37c3f3124c8a364543"}, - {file = "frozendict-2.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:87ebcde21565a14fe039672c25550060d6f6d88cf1f339beac094c3b10004eb0"}, - {file = "frozendict-2.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:fefeb700bc7eb8b4c2dc48704e4221860d254c8989fb53488540bc44e44a1ac2"}, - {file = "frozendict-2.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:4297d694eb600efa429769125a6f910ec02b85606f22f178bafbee309e7d3ec7"}, - {file = "frozendict-2.4.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:812ab17522ba13637826e65454115a914c2da538356e85f43ecea069813e4b33"}, - {file = "frozendict-2.4.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fee9420475bb6ff357000092aa9990c2f6182b2bab15764330f4ad7de2eae49"}, - {file = "frozendict-2.4.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3148062675536724502c6344d7c485dd4667fdf7980ca9bd05e338ccc0c4471e"}, - {file = "frozendict-2.4.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:78c94991944dd33c5376f720228e5b252ee67faf3bac50ef381adc9e51e90d9d"}, - {file = "frozendict-2.4.4-cp36-cp36m-win_amd64.whl", hash = "sha256:1697793b5f62b416c0fc1d94638ec91ed3aa4ab277f6affa3a95216ecb3af170"}, - {file = "frozendict-2.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:199a4d32194f3afed6258de7e317054155bc9519252b568d9cfffde7e4d834e5"}, - {file = "frozendict-2.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85375ec6e979e6373bffb4f54576a68bf7497c350861d20686ccae38aab69c0a"}, - {file = "frozendict-2.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2d8536e068d6bf281f23fa835ac07747fb0f8851879dd189e9709f9567408b4d"}, - {file = "frozendict-2.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:259528ba6b56fa051bc996f1c4d8b57e30d6dd3bc2f27441891b04babc4b5e73"}, - {file = "frozendict-2.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:07c3a5dee8bbb84cba770e273cdbf2c87c8e035903af8f781292d72583416801"}, - {file = "frozendict-2.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6874fec816b37b6eb5795b00e0574cba261bf59723e2de607a195d5edaff0786"}, - {file = "frozendict-2.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8f92425686323a950337da4b75b4c17a3327b831df8c881df24038d560640d4"}, - {file = "frozendict-2.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d58d9a8d9e49662c6dafbea5e641f97decdb3d6ccd76e55e79818415362ba25"}, - {file = "frozendict-2.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:93a7b19afb429cbf99d56faf436b45ef2fa8fe9aca89c49eb1610c3bd85f1760"}, - {file = "frozendict-2.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2b70b431e3a72d410a2cdf1497b3aba2f553635e0c0f657ce311d841bf8273b6"}, - {file = "frozendict-2.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:e1b941132d79ce72d562a13341d38fc217bc1ee24d8c35a20d754e79ff99e038"}, - {file = "frozendict-2.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc2228874eacae390e63fd4f2bb513b3144066a977dc192163c9f6c7f6de6474"}, - {file = "frozendict-2.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63aa49f1919af7d45fb8fd5dec4c0859bc09f46880bd6297c79bb2db2969b63d"}, - {file = "frozendict-2.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6bf9260018d653f3cab9bd147bd8592bf98a5c6e338be0491ced3c196c034a3"}, - {file = "frozendict-2.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6eb716e6a6d693c03b1d53280a1947716129f5ef9bcdd061db5c17dea44b80fe"}, - {file = "frozendict-2.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d13b4310db337f4d2103867c5a05090b22bc4d50ca842093779ef541ea9c9eea"}, - {file = "frozendict-2.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:b3b967d5065872e27b06f785a80c0ed0a45d1f7c9b85223da05358e734d858ca"}, - {file = "frozendict-2.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:4ae8d05c8d0b6134bfb6bfb369d5fa0c4df21eabb5ca7f645af95fdc6689678e"}, - {file = "frozendict-2.4.4-py311-none-any.whl", hash = "sha256:705efca8d74d3facbb6ace80ab3afdd28eb8a237bfb4063ed89996b024bc443d"}, - {file = "frozendict-2.4.4-py312-none-any.whl", hash = "sha256:d9647563e76adb05b7cde2172403123380871360a114f546b4ae1704510801e5"}, - {file = "frozendict-2.4.4.tar.gz", hash = "sha256:3f7c031b26e4ee6a3f786ceb5e3abf1181c4ade92dce1f847da26ea2c96008c7"}, -] - -[[package]] -name = "ghp-import" -version = "2.1.0" -description = "Copy your docs directly to the gh-pages branch." -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, - {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, -] - -[package.dependencies] -python-dateutil = ">=2.8.1" - -[package.extras] -dev = ["flake8", "markdown", "twine", "wheel"] - -[[package]] -name = "griffe" -version = "0.47.0" -description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "griffe-0.47.0-py3-none-any.whl", hash = "sha256:07a2fd6a8c3d21d0bbb0decf701d62042ccc8a576645c7f8799fe1f10de2b2de"}, - {file = "griffe-0.47.0.tar.gz", hash = "sha256:95119a440a3c932b13293538bdbc405bee4c36428547553dc6b327e7e7d35e5a"}, -] - -[package.dependencies] -astunparse = {version = ">=1.6", markers = "python_version < \"3.9\""} -colorama = ">=0.4" - -[[package]] -name = "identify" -version = "2.6.0" -description = "File identification library for Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, - {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, -] - -[package.extras] -license = ["ukkonen"] - -[[package]] -name = "idna" -version = "3.7" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -groups = ["main", "dev"] -files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, -] - -[[package]] -name = "importlib-metadata" -version = "8.0.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -markers = "python_version < \"3.10\"" -files = [ - {file = "importlib_metadata-8.0.0-py3-none-any.whl", hash = "sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f"}, - {file = "importlib_metadata-8.0.0.tar.gz", hash = "sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812"}, -] - -[package.dependencies] -zipp = ">=0.5" - -[package.extras] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] -name = "jinja2" -version = "3.1.4" -description = "A very fast and expressive template engine." -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "lazy-object-proxy" -version = "1.10.0" -description = "A fast and thorough lazy object proxy." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "lazy-object-proxy-1.10.0.tar.gz", hash = "sha256:78247b6d45f43a52ef35c25b5581459e85117225408a4128a3daf8bf9648ac69"}, - {file = "lazy_object_proxy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:855e068b0358ab916454464a884779c7ffa312b8925c6f7401e952dcf3b89977"}, - {file = "lazy_object_proxy-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab7004cf2e59f7c2e4345604a3e6ea0d92ac44e1c2375527d56492014e690c3"}, - {file = "lazy_object_proxy-1.10.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc0d2fc424e54c70c4bc06787e4072c4f3b1aa2f897dfdc34ce1013cf3ceef05"}, - {file = "lazy_object_proxy-1.10.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e2adb09778797da09d2b5ebdbceebf7dd32e2c96f79da9052b2e87b6ea495895"}, - {file = "lazy_object_proxy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b1f711e2c6dcd4edd372cf5dec5c5a30d23bba06ee012093267b3376c079ec83"}, - {file = "lazy_object_proxy-1.10.0-cp310-cp310-win32.whl", hash = "sha256:76a095cfe6045c7d0ca77db9934e8f7b71b14645f0094ffcd842349ada5c5fb9"}, - {file = "lazy_object_proxy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:b4f87d4ed9064b2628da63830986c3d2dca7501e6018347798313fcf028e2fd4"}, - {file = "lazy_object_proxy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fec03caabbc6b59ea4a638bee5fce7117be8e99a4103d9d5ad77f15d6f81020c"}, - {file = "lazy_object_proxy-1.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02c83f957782cbbe8136bee26416686a6ae998c7b6191711a04da776dc9e47d4"}, - {file = "lazy_object_proxy-1.10.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009e6bb1f1935a62889ddc8541514b6a9e1fcf302667dcb049a0be5c8f613e56"}, - {file = "lazy_object_proxy-1.10.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75fc59fc450050b1b3c203c35020bc41bd2695ed692a392924c6ce180c6f1dc9"}, - {file = "lazy_object_proxy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:782e2c9b2aab1708ffb07d4bf377d12901d7a1d99e5e410d648d892f8967ab1f"}, - {file = "lazy_object_proxy-1.10.0-cp311-cp311-win32.whl", hash = "sha256:edb45bb8278574710e68a6b021599a10ce730d156e5b254941754a9cc0b17d03"}, - {file = "lazy_object_proxy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:e271058822765ad5e3bca7f05f2ace0de58a3f4e62045a8c90a0dfd2f8ad8cc6"}, - {file = "lazy_object_proxy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e98c8af98d5707dcdecc9ab0863c0ea6e88545d42ca7c3feffb6b4d1e370c7ba"}, - {file = "lazy_object_proxy-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:952c81d415b9b80ea261d2372d2a4a2332a3890c2b83e0535f263ddfe43f0d43"}, - {file = "lazy_object_proxy-1.10.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80b39d3a151309efc8cc48675918891b865bdf742a8616a337cb0090791a0de9"}, - {file = "lazy_object_proxy-1.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e221060b701e2aa2ea991542900dd13907a5c90fa80e199dbf5a03359019e7a3"}, - {file = "lazy_object_proxy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92f09ff65ecff3108e56526f9e2481b8116c0b9e1425325e13245abfd79bdb1b"}, - {file = "lazy_object_proxy-1.10.0-cp312-cp312-win32.whl", hash = "sha256:3ad54b9ddbe20ae9f7c1b29e52f123120772b06dbb18ec6be9101369d63a4074"}, - {file = "lazy_object_proxy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:127a789c75151db6af398b8972178afe6bda7d6f68730c057fbbc2e96b08d282"}, - {file = "lazy_object_proxy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4ed0518a14dd26092614412936920ad081a424bdcb54cc13349a8e2c6d106a"}, - {file = "lazy_object_proxy-1.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ad9e6ed739285919aa9661a5bbed0aaf410aa60231373c5579c6b4801bd883c"}, - {file = "lazy_object_proxy-1.10.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fc0a92c02fa1ca1e84fc60fa258458e5bf89d90a1ddaeb8ed9cc3147f417255"}, - {file = "lazy_object_proxy-1.10.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0aefc7591920bbd360d57ea03c995cebc204b424524a5bd78406f6e1b8b2a5d8"}, - {file = "lazy_object_proxy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5faf03a7d8942bb4476e3b62fd0f4cf94eaf4618e304a19865abf89a35c0bbee"}, - {file = "lazy_object_proxy-1.10.0-cp38-cp38-win32.whl", hash = "sha256:e333e2324307a7b5d86adfa835bb500ee70bfcd1447384a822e96495796b0ca4"}, - {file = "lazy_object_proxy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:cb73507defd385b7705c599a94474b1d5222a508e502553ef94114a143ec6696"}, - {file = "lazy_object_proxy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:366c32fe5355ef5fc8a232c5436f4cc66e9d3e8967c01fb2e6302fd6627e3d94"}, - {file = "lazy_object_proxy-1.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2297f08f08a2bb0d32a4265e98a006643cd7233fb7983032bd61ac7a02956b3b"}, - {file = "lazy_object_proxy-1.10.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18dd842b49456aaa9a7cf535b04ca4571a302ff72ed8740d06b5adcd41fe0757"}, - {file = "lazy_object_proxy-1.10.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:217138197c170a2a74ca0e05bddcd5f1796c735c37d0eee33e43259b192aa424"}, - {file = "lazy_object_proxy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a3a87cf1e133e5b1994144c12ca4aa3d9698517fe1e2ca82977781b16955658"}, - {file = "lazy_object_proxy-1.10.0-cp39-cp39-win32.whl", hash = "sha256:30b339b2a743c5288405aa79a69e706a06e02958eab31859f7f3c04980853b70"}, - {file = "lazy_object_proxy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:a899b10e17743683b293a729d3a11f2f399e8a90c73b089e29f5d0fe3509f0dd"}, - {file = "lazy_object_proxy-1.10.0-pp310.pp311.pp312.pp38.pp39-none-any.whl", hash = "sha256:80fa48bd89c8f2f456fc0765c11c23bf5af827febacd2f523ca5bc1893fcc09d"}, -] - -[[package]] -name = "markdown" -version = "3.6" -description = "Python implementation of John Gruber's Markdown." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, - {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} - -[package.extras] -docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] -testing = ["coverage", "pyyaml"] - -[[package]] -name = "markupsafe" -version = "2.1.5" -description = "Safely add untrusted strings to HTML/XML markup." -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, -] - -[[package]] -name = "marshmallow" -version = "3.21.3" -description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, - {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, -] - -[package.dependencies] -packaging = ">=17.0" - -[package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] - -[[package]] -name = "marshmallow-enum" -version = "1.5.1" -description = "Enum field for Marshmallow" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "marshmallow-enum-1.5.1.tar.gz", hash = "sha256:38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58"}, - {file = "marshmallow_enum-1.5.1-py2.py3-none-any.whl", hash = "sha256:57161ab3dbfde4f57adeb12090f39592e992b9c86d206d02f6bd03ebec60f072"}, -] - -[package.dependencies] -marshmallow = ">=2.0.0" - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "mdx-include" -version = "1.4.2" -description = "Python Markdown extension to include local or remote files" -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "mdx_include-1.4.2-py3-none-any.whl", hash = "sha256:cfbeadd59985f27a9b70cb7ab0a3d209892fe1bb1aa342df055e0b135b3c9f34"}, - {file = "mdx_include-1.4.2.tar.gz", hash = "sha256:992f9fbc492b5cf43f7d8cb4b90b52a4e4c5fdd7fd04570290a83eea5c84f297"}, -] - -[package.dependencies] -cyclic = "*" -Markdown = ">=2.6" -rcslice = ">=1.1.0" - -[[package]] -name = "mergedeep" -version = "1.3.4" -description = "A deep merge function for 🐍." -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, - {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, -] - -[[package]] -name = "mkdocs" -version = "1.6.0" -description = "Project documentation with Markdown." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"}, - {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"}, -] - -[package.dependencies] -click = ">=7.0" -colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} -ghp-import = ">=1.0" -importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} -jinja2 = ">=2.11.1" -markdown = ">=3.3.6" -markupsafe = ">=2.0.1" -mergedeep = ">=1.3.4" -mkdocs-get-deps = ">=0.2.0" -packaging = ">=20.5" -pathspec = ">=0.11.1" -pyyaml = ">=5.1" -pyyaml-env-tag = ">=0.1" -watchdog = ">=2.0" - -[package.extras] -i18n = ["babel (>=2.9.0)"] -min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4) ; platform_system == \"Windows\"", "ghp-import (==1.0)", "importlib-metadata (==4.4) ; python_version < \"3.10\"", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] - -[[package]] -name = "mkdocs-autorefs" -version = "1.0.1" -description = "Automatically link across pages in MkDocs." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, - {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, -] - -[package.dependencies] -Markdown = ">=3.3" -markupsafe = ">=2.0.1" -mkdocs = ">=1.1" - -[[package]] -name = "mkdocs-get-deps" -version = "0.2.0" -description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, - {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} -mergedeep = ">=1.3.4" -platformdirs = ">=2.2.0" -pyyaml = ">=5.1" - -[[package]] -name = "mkdocs-material" -version = "9.5.29" -description = "Documentation that simply works" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mkdocs_material-9.5.29-py3-none-any.whl", hash = "sha256:afc1f508e2662ded95f0a35a329e8a5acd73ee88ca07ba73836eb6fcdae5d8b4"}, - {file = "mkdocs_material-9.5.29.tar.gz", hash = "sha256:3e977598ec15a4ddad5c4dfc9e08edab6023edb51e88f0729bd27be77e3d322a"}, -] - -[package.dependencies] -babel = ">=2.10,<3.0" -colorama = ">=0.4,<1.0" -jinja2 = ">=3.0,<4.0" -markdown = ">=3.2,<4.0" -mkdocs = ">=1.6,<2.0" -mkdocs-material-extensions = ">=1.3,<2.0" -paginate = ">=0.5,<1.0" -pygments = ">=2.16,<3.0" -pymdown-extensions = ">=10.2,<11.0" -regex = ">=2022.4" -requests = ">=2.26,<3.0" - -[package.extras] -git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] -imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] -recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] - -[[package]] -name = "mkdocs-material-extensions" -version = "1.3.1" -description = "Extension pack for Python Markdown and MkDocs Material." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, - {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, -] - -[[package]] -name = "mkdocs-simple-hooks" -version = "0.1.5" -description = "Define your own hooks for mkdocs, without having to create a new package." -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "mkdocs-simple-hooks-0.1.5.tar.gz", hash = "sha256:dddbdf151a18723c9302a133e5cf79538be8eb9d274e8e07d2ac3ac34890837c"}, - {file = "mkdocs_simple_hooks-0.1.5-py3-none-any.whl", hash = "sha256:efeabdbb98b0850a909adee285f3404535117159d5cb3a34f541d6eaa644d50a"}, -] - -[package.dependencies] -mkdocs = ">=1.2" - -[package.extras] -test = ["pytest (>=4.0)", "pytest-cov"] - -[[package]] -name = "mkdocstrings" -version = "0.20.0" -description = "Automatic documentation from sources, for MkDocs." -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "mkdocstrings-0.20.0-py3-none-any.whl", hash = "sha256:f17fc2c4f760ec302b069075ef9e31045aa6372ca91d2f35ded3adba8e25a472"}, - {file = "mkdocstrings-0.20.0.tar.gz", hash = "sha256:c757f4f646d4f939491d6bc9256bfe33e36c5f8026392f49eaa351d241c838e5"}, -] - -[package.dependencies] -Jinja2 = ">=2.11.1" -Markdown = ">=3.3" -MarkupSafe = ">=1.1" -mkdocs = ">=1.2" -mkdocs-autorefs = ">=0.3.1" -mkdocstrings-python = {version = ">=0.5.2", optional = true, markers = "extra == \"python\""} -pymdown-extensions = ">=6.3" - -[package.extras] -crystal = ["mkdocstrings-crystal (>=0.3.4)"] -python = ["mkdocstrings-python (>=0.5.2)"] -python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] - -[[package]] -name = "mkdocstrings-python" -version = "1.8.0" -description = "A Python handler for mkdocstrings." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mkdocstrings_python-1.8.0-py3-none-any.whl", hash = "sha256:4209970cc90bec194568682a535848a8d8489516c6ed4adbe58bbc67b699ca9d"}, - {file = "mkdocstrings_python-1.8.0.tar.gz", hash = "sha256:1488bddf50ee42c07d9a488dddc197f8e8999c2899687043ec5dd1643d057192"}, -] - -[package.dependencies] -griffe = ">=0.37" -mkdocstrings = ">=0.20" - -[[package]] -name = "mypy" -version = "1.10.1" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, -] - -[package.dependencies] -mypy-extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -groups = ["main", "dev"] -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - -[[package]] -name = "packaging" -version = "24.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, -] - -[[package]] -name = "paginate" -version = "0.5.6" -description = "Divides large result sets into pages for easier browsing" -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, -] - -[[package]] -name = "pathspec" -version = "0.12.1" -description = "Utility library for gitignore style pattern matching of file paths." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, - {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, -] - -[[package]] -name = "platformdirs" -version = "4.2.2" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, -] - -[package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pre-commit" -version = "2.21.0" -description = "A framework for managing and maintaining multi-language pre-commit hooks." -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "pre_commit-2.21.0-py2.py3-none-any.whl", hash = "sha256:e2f91727039fc39a92f58a588a25b87f936de6567eed4f0e673e0507edc75bad"}, - {file = "pre_commit-2.21.0.tar.gz", hash = "sha256:31ef31af7e474a8d8995027fefdfcf509b5c913ff31f2015b4ec4beb26a6f658"}, -] - -[package.dependencies] -cfgv = ">=2.0.0" -identify = ">=1.0.0" -nodeenv = ">=0.11.1" -pyyaml = ">=5.1" -virtualenv = ">=20.10.0" - -[[package]] -name = "prompt-toolkit" -version = "3.0.47" -description = "Library for building powerful interactive command lines in Python" -optional = false -python-versions = ">=3.7.0" -groups = ["main"] -files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, -] - -[package.dependencies] -wcwidth = "*" - -[[package]] -name = "pydantic" -version = "2.8.2" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, -] - -[package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" -typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, -] - -[package.extras] -email = ["email-validator (>=2.0.0)"] - -[[package]] -name = "pydantic-core" -version = "2.20.1" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pygments" -version = "2.18.0" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, -] - -[package.extras] -windows-terminal = ["colorama (>=0.4.6)"] - -[[package]] -name = "pylint" -version = "2.17.7" -description = "python code static checker" -optional = false -python-versions = ">=3.7.2" -groups = ["dev"] -files = [ - {file = "pylint-2.17.7-py3-none-any.whl", hash = "sha256:27a8d4c7ddc8c2f8c18aa0050148f89ffc09838142193fdbe98f172781a3ff87"}, - {file = "pylint-2.17.7.tar.gz", hash = "sha256:f4fcac7ae74cfe36bc8451e931d8438e4a476c20314b1101c458ad0f05191fad"}, -] - -[package.dependencies] -astroid = ">=2.15.8,<=2.17.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, -] -isort = ">=4.2.5,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pymdown-extensions" -version = "10.8.1" -description = "Extension pack for Python Markdown." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pymdown_extensions-10.8.1-py3-none-any.whl", hash = "sha256:f938326115884f48c6059c67377c46cf631c733ef3629b6eed1349989d1b30cb"}, - {file = "pymdown_extensions-10.8.1.tar.gz", hash = "sha256:3ab1db5c9e21728dabf75192d71471f8e50f216627e9a1fa9535ecb0231b9940"}, -] - -[package.dependencies] -markdown = ">=3.6" -pyyaml = "*" - -[package.extras] -extra = ["pygments (>=2.12)"] - -[[package]] -name = "pytest" -version = "7.4.4" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-mock" -version = "3.14.0" -description = "Thin-wrapper around the mock package for easier use with pytest" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, - {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, -] - -[package.dependencies] -pytest = ">=6.2.5" - -[package.extras] -dev = ["pre-commit", "pytest-asyncio", "tox"] - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main", "dev"] -files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "pytz" -version = "2024.1" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -groups = ["dev"] -markers = "python_version < \"3.9\"" -files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, -] - -[[package]] -name = "pyyaml" -version = "6.0.1" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.6" -groups = ["main", "dev"] -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] - -[[package]] -name = "pyyaml-env-tag" -version = "0.1" -description = "A custom YAML tag for referencing environment variables in YAML files. " -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, - {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, -] - -[package.dependencies] -pyyaml = "*" - -[[package]] -name = "questionary" -version = "1.10.0" -description = "Python library to build pretty command line user prompts ⭐️" -optional = false -python-versions = ">=3.6,<4.0" -groups = ["main"] -files = [ - {file = "questionary-1.10.0-py3-none-any.whl", hash = "sha256:fecfcc8cca110fda9d561cb83f1e97ecbb93c613ff857f655818839dac74ce90"}, - {file = "questionary-1.10.0.tar.gz", hash = "sha256:600d3aefecce26d48d97eee936fdb66e4bc27f934c3ab6dd1e292c4f43946d90"}, -] - -[package.dependencies] -prompt_toolkit = ">=2.0,<4.0" - -[package.extras] -docs = ["Sphinx (>=3.3,<4.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphinx-autodoc-typehints (>=1.11.1,<2.0.0)", "sphinx-copybutton (>=0.3.1,<0.4.0)", "sphinx-rtd-theme (>=0.5.0,<0.6.0)"] - -[[package]] -name = "rcslice" -version = "1.1.0" -description = "Slice a list of sliceables (1 indexed, start and end index both are inclusive)" -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "rcslice-1.1.0-py3-none-any.whl", hash = "sha256:1b12fc0c0ca452e8a9fd2b56ac008162f19e250906a4290a7e7a98be3200c2a6"}, - {file = "rcslice-1.1.0.tar.gz", hash = "sha256:a2ce70a60690eb63e52b722e046b334c3aaec5e900b28578f529878782ee5c6e"}, -] - -[[package]] -name = "regex" -version = "2024.5.15" -description = "Alternative regular expression module, to replace re." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"}, - {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"}, - {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"}, - {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"}, - {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"}, - {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"}, - {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"}, - {file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"}, - {file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"}, - {file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"}, - {file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"}, - {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"}, -] - -[[package]] -name = "requests" -version = "2.32.3" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "requests-mock" -version = "1.12.1" -description = "Mock out responses from the requests package" -optional = false -python-versions = ">=3.5" -groups = ["dev"] -files = [ - {file = "requests-mock-1.12.1.tar.gz", hash = "sha256:e9e12e333b525156e82a3c852f22016b9158220d2f47454de9cae8a77d371401"}, - {file = "requests_mock-1.12.1-py2.py3-none-any.whl", hash = "sha256:b1e37054004cdd5e56c84454cc7df12b25f90f382159087f4b6915aaeef39563"}, -] - -[package.dependencies] -requests = ">=2.22,<3" - -[package.extras] -fixture = ["fixtures"] - -[[package]] -name = "rich" -version = "12.6.0" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -optional = false -python-versions = ">=3.6.3,<4.0.0" -groups = ["main"] -files = [ - {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, - {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, -] - -[package.dependencies] -commonmark = ">=0.9.0,<0.10.0" -pygments = ">=2.6.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] - -[[package]] -name = "ruff" -version = "0.0.252" -description = "An extremely fast Python linter, written in Rust." -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "ruff-0.0.252-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:349367a227c4db7abbc3a9993efea8a608b5bea4bb4a1e5fc6f0d56819524f92"}, - {file = "ruff-0.0.252-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ce77f9106d96b4faf7865860fb5155b9deaf6f699d9c279118c5ad947739ecaf"}, - {file = "ruff-0.0.252-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edadb0b050293b4e60dab979ba6a4e734d9c899cbe316a0ee5b65e3cdd39c750"}, - {file = "ruff-0.0.252-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4efdae98937d1e4d23ab0b7fc7e8e6b6836cc7d2d42238ceeacbc793ef780542"}, - {file = "ruff-0.0.252-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8546d879f7d3f669379a03e7b103d90e11901976ab508aeda59c03dfd8a359e"}, - {file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:83fdc7169b6c1fb5fe8d1cdf345697f558c1b433ef97df9ca11defa2a8f3ee9e"}, - {file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84ed9be1a17e2a556a571a5b959398633dd10910abd8dcf8b098061e746e892d"}, - {file = "ruff-0.0.252-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f5e77bd9ba4438cf2ee32154e2673afe22f538ef29f5d65ca47e3dc46c42cf8"}, - {file = "ruff-0.0.252-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5179b94b45c0f8512eaff3ab304c14714a46df2e9ca72a9d96084adc376b71"}, - {file = "ruff-0.0.252-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:92efd8a71157595df5bc46aaaa0613d8a2fbc5cddc53ae7b749c16025c324732"}, - {file = "ruff-0.0.252-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd350fc10832cfd28e681d829a8aa83ea3e653326e0ea9d98637dfb8d46177d2"}, - {file = "ruff-0.0.252-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f119240c9631216e846166e06023b1d878e25fbac93bf20da50069e91cfbfaee"}, - {file = "ruff-0.0.252-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5c5a49f89f5ede93d16eddfeeadd7e5739ec703e8f63ac95eac30236b9e49da3"}, - {file = "ruff-0.0.252-py3-none-win32.whl", hash = "sha256:89a897dc743f2fe063483ea666097e72e848f4bbe40493fe0533e61799959f6e"}, - {file = "ruff-0.0.252-py3-none-win_amd64.whl", hash = "sha256:cdc89ad6ff88519b1fb1816ac82a9ad910762c90ff5fd64dda7691b72d36aff7"}, - {file = "ruff-0.0.252-py3-none-win_arm64.whl", hash = "sha256:4b594a17cf53077165429486650658a0e1b2ac6ab88954f5afd50d2b1b5657a9"}, - {file = "ruff-0.0.252.tar.gz", hash = "sha256:6992611ab7bdbe7204e4831c95ddd3febfeece2e6f5e44bbed044454c7db0f63"}, -] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main", "dev"] -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sseclient-py" -version = "1.8.0" -description = "SSE client for Python" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "sseclient-py-1.8.0.tar.gz", hash = "sha256:c547c5c1a7633230a38dc599a21a2dc638f9b5c297286b48b46b935c71fac3e8"}, - {file = "sseclient_py-1.8.0-py2.py3-none-any.whl", hash = "sha256:4ecca6dc0b9f963f8384e9d7fd529bf93dd7d708144c4fb5da0e0a1a926fee83"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.0" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, - {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, -] - -[[package]] -name = "types-frozendict" -version = "2.0.9" -description = "Typing stubs for frozendict" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "types-frozendict-2.0.9.tar.gz", hash = "sha256:446b5f33125566c9885c31363e065d6416f8da6c81252af1e459a4e67a15487b"}, - {file = "types_frozendict-2.0.9-py3-none-any.whl", hash = "sha256:d4292ef14f87ddec575ab1d968102dcba2c8f1e490acc67945eee1fc7de24d08"}, -] - -[[package]] -name = "types-pyyaml" -version = "6.0.12.20240311" -description = "Typing stubs for PyYAML" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, - {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, -] - -[[package]] -name = "types-requests" -version = "2.31.0.6" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"}, - {file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"}, -] - -[package.dependencies] -types-urllib3 = "*" - -[[package]] -name = "types-setuptools" -version = "57.4.18" -description = "Typing stubs for setuptools" -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "types-setuptools-57.4.18.tar.gz", hash = "sha256:8ee03d823fe7fda0bd35faeae33d35cb5c25b497263e6a58b34c4cfd05f40bcf"}, - {file = "types_setuptools-57.4.18-py3-none-any.whl", hash = "sha256:9660b8774b12cd61b448e2fd87a667c02e7ec13ce9f15171f1d49a4654c4df6a"}, -] - -[[package]] -name = "types-urllib3" -version = "1.26.25.14" -description = "Typing stubs for urllib3" -optional = false -python-versions = "*" -groups = ["dev"] -files = [ - {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, - {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, -] - -[package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" - -[[package]] -name = "urllib3" -version = "1.26.19" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -groups = ["main", "dev"] -files = [ - {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, - {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, -] - -[package.extras] -brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "virtualenv" -version = "20.26.3" -description = "Virtual Python Environment builder" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, - {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, -] - -[package.dependencies] -distlib = ">=0.3.7,<1" -filelock = ">=3.12.2,<4" -platformdirs = ">=3.9.1,<5" - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] - -[[package]] -name = "watchdog" -version = "4.0.1" -description = "Filesystem events monitoring" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, - {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, - {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, - {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, - {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, -] - -[package.extras] -watchmedo = ["PyYAML (>=3.10)"] - -[[package]] -name = "wcwidth" -version = "0.2.13" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, - {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, -] - -[[package]] -name = "wheel" -version = "0.43.0" -description = "A built-package format for Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -markers = "python_version < \"3.9\"" -files = [ - {file = "wheel-0.43.0-py3-none-any.whl", hash = "sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81"}, - {file = "wheel-0.43.0.tar.gz", hash = "sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85"}, -] - -[package.extras] -test = ["pytest (>=6.0.0)", "setuptools (>=65)"] - -[[package]] -name = "wrapt" -version = "1.16.0" -description = "Module for decorators, wrappers and monkey patching." -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, - {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, - {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, - {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, - {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, - {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, - {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, - {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, - {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, - {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, - {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, - {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, - {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, - {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, - {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, - {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, - {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, - {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, -] - -[[package]] -name = "zipp" -version = "3.19.2" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -markers = "python_version < \"3.10\"" -files = [ - {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"}, - {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"}, -] - -[package.extras] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] - -[metadata] -lock-version = "2.1" -python-versions = "^3.8" -content-hash = "9d56faff9fdab823b7d51d55a8e5a3217be9ec116d3221a345c647793f272678" diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 2a4a6d2c..00000000 --- a/pyproject.toml +++ /dev/null @@ -1,94 +0,0 @@ -[tool.black] -line-length = 120 -target-version = ['py37'] -include = '\.pyi?$' -exclude = ''' -( - /( - \.eggs # exclude a few common directories in the - | \.git # root of the project - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | _build - | buck-out - | build - | dist - | launch/api_client # generated code - )/ -) -''' - -[tool.mypy] -exclude = [ - '^launch/clientlib/', - '^launch/api_client/' -] - -[tool.poetry] -name = "scale-launch" -version = "0.4.0" -description = "The official Python client library for Launch, the Data Platform for AI" -authors = ["Your Name "] -readme = "README.md" -homepage = "https://scale.com/" -repository = "https://github.com/scaleapi/launch-python-client" -packages = [{ include = "launch" }] - -[tool.poetry.dependencies] -python = "^3.8" -cloudpickle = "^2.0.0" -requests = "^2.25.1" -dataclasses-json = "^0.5.7" -rich = ">=12.0.0" -deprecation = "^2.1.0" -python-dateutil = "^2.8.2" -pyyaml = ">=5.3.1,<7.0.0" -typing-extensions = "^4.1.1" -frozendict = "^2.3.4" -pydantic = "^2.7" -types-frozendict = "^2.0.9" -questionary = "^1.10.0" -click = ">=7.1.2" # type: -urllib3 = ">=1.26.7" -sseclient-py = "^1.7.2" - -[tool.poetry.scripts] -scale-launch = 'launch.cli.bin:entry_point' - -[tool.poetry.group.dev.dependencies] -black = "^23.1.0" -mypy = "^1.10.1" -isort = "^5.10.1" -pylint = "^2.12.2" -pytest = "^7.1.1" -pre-commit = "^2.17.0" -coverage = "^6.3.2" -types-setuptools = "^57.4.11" -types-requests = "^2.27.13" -types-pyyaml = "^6.0.5" -requests_mock = "^1.9.3" -ruff = "^0.0.252" -mkdocs = "^1.4.2" -mkdocs-material = "^9.0.14" -mkdocs-simple-hooks = "^0.1.5" -mkdocstrings = {extras = ["python"], version = "^0.20.0"} -mdx-include = "^1.4.2" -pytest-mock = "^3.10.0" - -[tool.pytest.ini_options] -log_cli = true -log_cli_level = "INFO" - -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" - -[tool.ruff] -line-length = 120 -extend-select = ['Q'] -flake8-quotes = {inline-quotes = 'double', multiline-quotes = 'double'} -exclude = [ - "launch/api_client", -] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..0a87b15c --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Scale Launch","text":"

Simple, scalable, and high performance ML service deployment in python.

"},{"location":"#example","title":"Example","text":"Launch Usage
import os\nimport time\nfrom launch import LaunchClient\nfrom launch import EndpointRequest\nfrom pydantic import BaseModel\nfrom rich import print\nclass MyRequestSchema(BaseModel):\nx: int\ny: str\nclass MyResponseSchema(BaseModel):\n__root__: int\ndef my_load_predict_fn(model):\ndef returns_model_of_x_plus_len_of_y(x: int, y: str) -> int:\n\"\"\"MyRequestSchema -> MyResponseSchema\"\"\"\nassert isinstance(x, int) and isinstance(y, str)\nreturn model(x) + len(y)\nreturn returns_model_of_x_plus_len_of_y\ndef my_load_model_fn():\ndef my_model(x):\nreturn x * 2\nreturn my_model\nBUNDLE_PARAMS = {\n\"model_bundle_name\": \"test-bundle\",\n\"load_predict_fn\": my_load_predict_fn,\n\"load_model_fn\": my_load_model_fn,\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"requirements\": [\"pytest==7.2.1\", \"numpy\"],  # list your requirements here\n\"pytorch_image_tag\": \"1.7.1-cuda11.0-cudnn8-runtime\",\n}\nENDPOINT_PARAMS = {\n\"endpoint_name\": \"demo-endpoint\",\n\"model_bundle\": \"test-bundle\",\n\"cpus\": 1,\n\"min_workers\": 0,\n\"endpoint_type\": \"async\",\n\"update_if_exists\": True,\n\"labels\": {\n\"team\": \"MY_TEAM\",\n\"product\": \"launch\",\n}\n}\ndef predict_on_endpoint(request: MyRequestSchema) -> MyResponseSchema:\n# Wait for the endpoint to be ready first before submitting a task\nendpoint = client.get_model_endpoint(endpoint_name=\"demo-endpoint\")\nwhile endpoint.status() != \"READY\":\ntime.sleep(10)\nendpoint_request = EndpointRequest(args=request.dict(), return_pickled=False)\nfuture = endpoint.predict(request=endpoint_request)\nraw_response = future.get()\nresponse = MyResponseSchema.parse_raw(raw_response.result)\nreturn response\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nclient.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS)\nendpoint = client.create_model_endpoint(**ENDPOINT_PARAMS)\nrequest = MyRequestSchema(x=5, y=\"hello\")\nresponse = predict_on_endpoint(request)\nprint(response)\n\"\"\"\nMyResponseSchema(__root__=10)\n\"\"\"\n

What's going on here:

  • First we use pydantic to define our request and response schemas, MyRequestSchema and MyResponseSchema. These schemas are used to generate the API documentation for our models.
  • Next we define the the model and the load_predict_fn, which tells Launch how to load our model and how to make predictions with it. In this case, we're just returning a function that adds the length of the string y to model(x), where model doubles the integer x.
  • We then define the model bundle by specifying the load_predict_fn, the request_schema, and the response_schema. We also specify the env_params, which tell Launch environment settings like the base image to use. In this case, we're using a PyTorch image.
  • Next, we create the model endpoint, which is the API that we'll use to make predictions. We specify the model_bundle that we created above, and we specify the endpoint_type, which tells Launch whether to use a synchronous or asynchronous endpoint. In this case, we're using an asynchronous endpoint, which means that we can make predictions and return immediately with a future object. We can then use the future object to get the prediction result later.
  • Finally, we make a prediction by calling predict_on_endpoint with a MyRequestSchema object. This function first waits for the endpoint to be ready, then it submits a prediction request to the endpoint. It then waits for the prediction result and returns it.

Notice that we specified min_workers=0, meaning that the endpoint will scale down to 0 workers when it's not being used.

"},{"location":"#installation","title":"Installation","text":"

To use Scale Launch, first install it using pip:

Installation
pip install -U scale-launch\n
"},{"location":"cli/","title":"CLI","text":"

Launch comes with a CLI for listing bundles / endpoints, editing endpoints, and sending tasks to endpoints.

The CLI can be used as scale-launch ....

"},{"location":"cli/#help","title":"Help","text":"

Run scale-launch --help for more options.

scale-launch --help
    This is the command line interface (CLI) package for Scale Launch.\n\n       \u2588\u2588\u2557      \u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2557   \u2588\u2588\u2557\u2588\u2588\u2588\u2557   \u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2557  \u2588\u2588\u2557\n       \u2588\u2588\u2551     \u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2551   \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2557  \u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\u2588\u2588\u2551  \u2588\u2588\u2551\n       \u2588\u2588\u2551     \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2551   \u2588\u2588\u2551\u2588\u2588\u2554\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2551     \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2551\n       \u2588\u2588\u2551     \u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2551\u2588\u2588\u2551   \u2588\u2588\u2551\u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2551     \u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2551\n       \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551  \u2588\u2588\u2551\u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2551\u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551  \u2588\u2588\u2551\n       \u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d  \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u255d \u255a\u2550\u255d  \u255a\u2550\u2550\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d  \u255a\u2550\u255d\n\nUsage: scale-launch [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n  --help  Show this message and exit.\n\nCommands:\n  batch-jobs  Batch Jobs is a wrapper around batch jobs in Scale Launch\n  bundles     Bundles is a wrapper around model bundles in Scale Launch\n  config      Config is a wrapper around getting and setting your API key and other configuration options\n  endpoints   Endpoints is a wrapper around model endpoints in Scale Launch\n  tasks       Tasks is a wrapper around sending requests to endpoints\n
"},{"location":"api/client/","title":"Launch Client","text":""},{"location":"api/client/#launch.client.LaunchClient","title":"LaunchClient","text":"
LaunchClient(api_key: str, endpoint: Optional[str] = None, self_hosted: bool = False, use_path_with_custom_endpoint: bool = False)\n

Scale Launch Python Client.

Initializes a Scale Launch Client.

Parameters:

Name Type Description Default api_key str

Your Scale API key

required endpoint Optional[str]

The Scale Launch Endpoint (this should not need to be changed)

None self_hosted bool

True iff you are connecting to a self-hosted Scale Launch

False use_path_with_custom_endpoint bool

True iff you are not using the default Scale Launch endpoint but your endpoint has path routing (to SCALE_LAUNCH_VX_PATH) set up

False"},{"location":"api/client/#launch.client.LaunchClient.batch_async_request","title":"batch_async_request","text":"
batch_async_request(*, model_bundle: Union[ModelBundle, str], urls: Optional[List[str]] = None, inputs: Optional[List[Dict[str, Any]]] = None, batch_url_file_location: Optional[str] = None, serialization_format: str = 'JSON', labels: Optional[Dict[str, str]] = None, cpus: Optional[int] = None, memory: Optional[str] = None, gpus: Optional[int] = None, gpu_type: Optional[str] = None, storage: Optional[str] = None, max_workers: Optional[int] = None, per_worker: Optional[int] = None, timeout_seconds: Optional[float] = None) -> Dict[str, Any]\n

Sends a batch inference request using a given bundle. Returns a key that can be used to retrieve the results of inference at a later time.

Must have exactly one of urls or inputs passed in.

Parameters:

Name Type Description Default model_bundle Union[ModelBundle, str]

The bundle or the name of a the bundle to use for inference.

required urls Optional[List[str]]

A list of urls, each pointing to a file containing model input. Must be accessible by Scale Launch, hence urls need to either be public or signedURLs.

None inputs Optional[List[Dict[str, Any]]]

A list of model inputs, if exists, we will upload the inputs and pass it in to Launch.

None batch_url_file_location Optional[str]

In self-hosted mode, the input to the batch job will be uploaded to this location if provided. Otherwise, one will be determined from bundle_location_fn()

None serialization_format str

Serialization format of output, either 'PICKLE' or 'JSON'. 'pickle' corresponds to pickling results + returning

'JSON' labels Optional[Dict[str, str]]

An optional dictionary of key/value pairs to associate with this endpoint.

None cpus Optional[int]

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater than or equal to 1.

None memory Optional[str]

Amount of memory each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of memory.

None storage Optional[str]

Amount of local ephemeral storage each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of storage.

None gpus Optional[int]

Number of gpus each worker should get, e.g. 0, 1, etc.

None max_workers Optional[int]

The maximum number of workers. Must be greater than or equal to 0, and as well as greater than or equal to min_workers.

None per_worker Optional[int]

The maximum number of concurrent requests that an individual worker can service. Launch automatically scales the number of workers for the endpoint so that each worker is processing per_worker requests:

  • If the average number of concurrent requests per worker is lower than per_worker, then the number of workers will be reduced.
  • Otherwise, if the average number of concurrent requests per worker is higher than per_worker, then the number of workers will be increased to meet the elevated traffic.
None gpu_type Optional[str]

If specifying a non-zero number of gpus, this controls the type of gpu requested. Here are the supported values:

  • nvidia-tesla-t4
  • nvidia-ampere-a10
  • nvidia-hopper-h100
  • nvidia-hopper-h100-1g20g
  • nvidia-hopper-h100-3g40g
None timeout_seconds Optional[float]

The maximum amount of time (in seconds) that the batch job can take. If not specified, the server defaults to 12 hours. This includes the time required to build the endpoint and the total time required for all the individual tasks.

None

Returns:

Type Description Dict[str, Any]

A dictionary that contains job_id as a key, and the ID as the value.

"},{"location":"api/client/#launch.client.LaunchClient.cancel_fine_tune","title":"cancel_fine_tune","text":"
cancel_fine_tune(fine_tune_id: str) -> CancelFineTuneResponse\n

Cancel a fine-tune

Parameters:

Name Type Description Default fine_tune_id str

ID of the fine-tune

required

Returns:

Name Type Description CancelFineTuneResponse CancelFineTuneResponse

whether the cancellation was successful

"},{"location":"api/client/#launch.client.LaunchClient.clone_model_bundle_with_changes","title":"clone_model_bundle_with_changes","text":"
clone_model_bundle_with_changes(model_bundle: Union[ModelBundle, str], app_config: Optional[Dict] = None) -> ModelBundle\n
Warning

This method is deprecated. Use clone_model_bundle_with_changes_v2 instead.

Parameters:

Name Type Description Default model_bundle Union[ModelBundle, str]

The existing bundle or its ID.

required app_config Optional[Dict]

The new bundle's app config, if not passed in, the new bundle's app_config will be set to None

None

Returns:

Type Description ModelBundle

A ModelBundle object

"},{"location":"api/client/#launch.client.LaunchClient.clone_model_bundle_with_changes_v2","title":"clone_model_bundle_with_changes_v2","text":"
clone_model_bundle_with_changes_v2(original_model_bundle_id: str, new_app_config: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response\n

Clone a model bundle with an optional new app_config.

Parameters:

Name Type Description Default original_model_bundle_id str

The ID of the model bundle you want to clone.

required new_app_config Optional[Dict[str, Any]]

A dictionary of new app config values to use for the cloned model.

None

Returns:

Type Description CreateModelBundleV2Response

An object containing the following keys:

  • model_bundle_id: The ID of the cloned model bundle.
"},{"location":"api/client/#launch.client.LaunchClient.completions_stream","title":"completions_stream","text":"
completions_stream(endpoint_name: str, prompt: str, max_new_tokens: int, temperature: float, stop_sequences: Optional[List[str]] = None, return_token_log_probs: Optional[bool] = False, timeout: float = DEFAULT_LLM_COMPLETIONS_TIMEOUT) -> Iterable[CompletionStreamV1Response]\n

Run prompt completion on an LLM endpoint in streaming fashion. Will fail if endpoint does not support streaming.

Parameters:

Name Type Description Default endpoint_name str

The name of the LLM endpoint to make the request to

required prompt str

The prompt to send to the endpoint

required max_new_tokens int

The maximum number of tokens to generate for each prompt

required temperature float

The temperature to use for sampling

required stop_sequences Optional[List[str]]

List of sequences to stop the completion at

None return_token_log_probs Optional[bool]

Whether to return the log probabilities of the tokens

False

Returns:

Type Description Iterable[CompletionStreamV1Response]

Iterable responses for prompt completion

"},{"location":"api/client/#launch.client.LaunchClient.completions_sync","title":"completions_sync","text":"
completions_sync(endpoint_name: str, prompt: str, max_new_tokens: int, temperature: float, stop_sequences: Optional[List[str]] = None, return_token_log_probs: Optional[bool] = False, timeout: float = DEFAULT_LLM_COMPLETIONS_TIMEOUT) -> CompletionSyncV1Response\n

Run prompt completion on a sync LLM endpoint. Will fail if the endpoint is not sync.

Parameters:

Name Type Description Default endpoint_name str

The name of the LLM endpoint to make the request to

required prompt str

The completion prompt to send to the endpoint

required max_new_tokens int

The maximum number of tokens to generate for each prompt

required temperature float

The temperature to use for sampling

required stop_sequences Optional[List[str]]

List of sequences to stop the completion at

None return_token_log_probs Optional[bool]

Whether to return the log probabilities of the tokens

False

Returns:

Type Description CompletionSyncV1Response

Response for prompt completion

"},{"location":"api/client/#launch.client.LaunchClient.create_docker_image_batch_job","title":"create_docker_image_batch_job","text":"
create_docker_image_batch_job(*, labels: Dict[str, str], docker_image_batch_job_bundle: Optional[Union[str, DockerImageBatchJobBundleResponse]] = None, docker_image_batch_job_bundle_name: Optional[str] = None, job_config: Optional[Dict[str, Any]] = None, cpus: Optional[int] = None, memory: Optional[str] = None, gpus: Optional[int] = None, gpu_type: Optional[str] = None, storage: Optional[str] = None)\n

For self hosted mode only. Parameters: docker_image_batch_job_bundle: Specifies the docker image bundle to use for the batch job. Either the string id of a docker image bundle, or a DockerImageBatchJobBundleResponse object. Only one of docker_image_batch_job_bundle and docker_image_batch_job_bundle_name can be specified. docker_image_batch_job_bundle_name: The name of a batch job bundle. If specified, Launch will use the most recent bundle with that name owned by the current user. Only one of docker_image_batch_job_bundle and docker_image_batch_job_bundle_name can be specified. labels: Kubernetes labels that are present on the batch job. job_config: A JSON-serializable python object that will get passed to the batch job, specifically as the contents of a file mounted at mount_location inside the bundle. You can call python's json.load() on the file to retrieve the contents. cpus: Optional override for the number of cpus to give to your job. Either the default must be specified in the bundle, or this must be specified. memory: Optional override for the amount of memory to give to your job. Either the default must be specified in the bundle, or this must be specified. gpus: Optional number of gpus to give to the bundle. If not specified in the bundle or here, will be interpreted as 0 gpus. gpu_type: Optional type of gpu. If the final number of gpus is positive, must be specified either in the bundle or here. storage: Optional reserved amount of disk to give to your batch job. If not specified, your job may be evicted if it is using too much disk.

"},{"location":"api/client/#launch.client.LaunchClient.create_docker_image_batch_job_bundle","title":"create_docker_image_batch_job_bundle","text":"
create_docker_image_batch_job_bundle(*, name: str, image_repository: str, image_tag: str, command: List[str], env: Optional[Dict[str, str]] = None, mount_location: Optional[str] = None, cpus: Optional[int] = None, memory: Optional[str] = None, gpus: Optional[int] = None, gpu_type: Optional[str] = None, storage: Optional[str] = None) -> CreateDockerImageBatchJobBundleResponse\n

For self hosted mode only.

Creates a Docker Image Batch Job Bundle.

Parameters:

Name Type Description Default name str

A user-defined name for the bundle. Does not need to be unique.

required image_repository str

The (short) repository of your image. For example, if your image is located at 123456789012.dkr.ecr.us-west-2.amazonaws.com/repo:tag, and your version of Launch is configured to look at 123456789012.dkr.ecr.us-west-2.amazonaws.com for Docker Images, you would pass the value repo for the image_repository parameter.

required image_tag str

The tag of your image inside of the repo. In the example above, you would pass the value tag for the image_tag parameter.

required command List[str]

The command to run inside the docker image.

required env Optional[Dict[str, str]]

A dictionary of environment variables to inject into your docker image.

None mount_location Optional[str]

A location in the filesystem where you would like a json-formatted file, controllable on runtime, to be mounted. This allows behavior to be specified on runtime. (Specifically, the contents of this file can be read via json.load() inside of the user-defined code.)

None cpus Optional[int]

Optional default value for the number of cpus to give the job.

None memory Optional[str]

Optional default value for the amount of memory to give the job.

None gpus Optional[int]

Optional default value for the number of gpus to give the job.

None gpu_type Optional[str]

Optional default value for the type of gpu to give the job.

None storage Optional[str]

Optional default value for the amount of disk to give the job.

None"},{"location":"api/client/#launch.client.LaunchClient.create_fine_tune","title":"create_fine_tune","text":"
create_fine_tune(model: str, training_file: str, validation_file: Optional[str] = None, fine_tuning_method: Optional[str] = None, hyperparameters: Optional[Dict[str, str]] = None, wandb_config: Optional[Dict[str, Any]] = None, suffix: str = None) -> CreateFineTuneResponse\n

Create a fine-tune

Parameters:

Name Type Description Default model str

Identifier of base model to train from.

required training_file str

Path to file of training dataset. Dataset must be a csv with columns 'prompt' and 'response'.

required validation_file Optional[str]

Path to file of validation dataset. Has the same format as training_file. If not provided, we will generate a split from the training dataset.

None fine_tuning_method Optional[str]

Fine-tuning method. Currently unused, but when different techniques are implemented we will expose this field.

None hyperparameters Optional[Dict[str, str]]

Hyperparameters to pass in to training job.

None wandb_config Optional[Dict[str, Any]]

Configuration for Weights and Biases. To enable set hyperparameters[\"report_to\"] to wandb. api_key must be provided which is the API key.

None suffix str

Optional user-provided identifier suffix for the fine-tuned model.

None

Returns:

Name Type Description CreateFineTuneResponse CreateFineTuneResponse

ID of the created fine-tune

"},{"location":"api/client/#launch.client.LaunchClient.create_llm_model_endpoint","title":"create_llm_model_endpoint","text":"
create_llm_model_endpoint(endpoint_name: str, model_name: str, inference_framework_image_tag: str, source: LLMSource = LLMSource.HUGGING_FACE, inference_framework: LLMInferenceFramework = LLMInferenceFramework.DEEPSPEED, num_shards: int = 4, quantize: Optional[Quantization] = None, checkpoint_path: Optional[str] = None, cpus: int = 32, memory: str = '192Gi', storage: Optional[str] = None, gpus: int = 4, min_workers: int = 0, max_workers: int = 1, per_worker: int = 10, gpu_type: Optional[str] = 'nvidia-ampere-a10', endpoint_type: str = 'sync', high_priority: Optional[bool] = False, post_inference_hooks: Optional[List[PostInferenceHooks]] = None, default_callback_url: Optional[str] = None, default_callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, default_callback_auth_username: Optional[str] = None, default_callback_auth_password: Optional[str] = None, default_callback_auth_cert: Optional[str] = None, default_callback_auth_key: Optional[str] = None, public_inference: Optional[bool] = None, update_if_exists: bool = False, labels: Optional[Dict[str, str]] = None)\n

Creates and registers a model endpoint in Scale Launch. The returned object is an instance of type Endpoint, which is a base class of either SyncEndpoint or AsyncEndpoint. This is the object to which you sent inference requests.

Parameters:

Name Type Description Default endpoint_name str

The name of the model endpoint you want to create. The name must be unique across all endpoints that you own.

required model_name str

name for the LLM. List can be found at (TODO: add list of supported models)

required inference_framework_image_tag str

image tag for the inference framework. (TODO: use latest image tag when unspecified)

required source LLMSource

source of the LLM. Currently only HuggingFace is supported.

HUGGING_FACE inference_framework LLMInferenceFramework

inference framework for the LLM. Currently only DeepSpeed is supported.

DEEPSPEED num_shards int

number of shards for the LLM. When bigger than 1, LLM will be sharded to multiple GPUs. Number of GPUs must be larger than num_shards.

4 quantize Optional[Quantization]

Quantization method for the LLM. Only affects behavior for text-generation-inference models.

None checkpoint_path Optional[str]

Path to the checkpoint to load the model from. Only affects behavior for text-generation-inference models.

None cpus int

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater than or equal to 1.

32 memory str

Amount of memory each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of memory.

'192Gi' storage Optional[str]

Amount of local ephemeral storage each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of storage.

None gpus int

Number of gpus each worker should get, e.g. 0, 1, etc.

4 min_workers int

The minimum number of workers. Must be greater than or equal to 0. This should be determined by computing the minimum throughput of your workload and dividing it by the throughput of a single worker. This field must be at least 1 for synchronous endpoints.

0 max_workers int

The maximum number of workers. Must be greater than or equal to 0, and as well as greater than or equal to min_workers. This should be determined by computing the maximum throughput of your workload and dividing it by the throughput of a single worker.

1 per_worker int

The maximum number of concurrent requests that an individual worker can service. Launch automatically scales the number of workers for the endpoint so that each worker is processing per_worker requests, subject to the limits defined by min_workers and max_workers.

  • If the average number of concurrent requests per worker is lower than per_worker, then the number of workers will be reduced. - Otherwise, if the average number of concurrent requests per worker is higher than per_worker, then the number of workers will be increased to meet the elevated traffic.

Here is our recommendation for computing per_worker:

  1. Compute min_workers and max_workers per your minimum and maximum throughput requirements. 2. Determine a value for the maximum number of concurrent requests in the workload. Divide this number by max_workers. Doing this ensures that the number of workers will \"climb\" to max_workers.
10 gpu_type Optional[str]

If specifying a non-zero number of gpus, this controls the type of gpu requested. Here are the supported values:

  • nvidia-tesla-t4
  • nvidia-ampere-a10
  • nvidia-hopper-h100
  • nvidia-hopper-h100-1g20g
  • nvidia-hopper-h100-3g40g
'nvidia-ampere-a10' endpoint_type str

Either \"sync\" or \"async\".

'sync' high_priority Optional[bool]

Either True or False. Enabling this will allow the created endpoint to leverage the shared pool of prewarmed nodes for faster spinup time.

False post_inference_hooks Optional[List[PostInferenceHooks]]

List of hooks to trigger after inference tasks are served.

None default_callback_url Optional[str]

The default callback url to use for async endpoints. This can be overridden in the task parameters for each individual task. post_inference_hooks must contain \"callback\" for the callback to be triggered.

None default_callback_auth_kind Optional[Literal['basic', 'mtls']]

The default callback auth kind to use for async endpoints. Either \"basic\" or \"mtls\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_username Optional[str]

The default callback auth username to use. This only applies if default_callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_password Optional[str]

The default callback auth password to use. This only applies if default_callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_cert Optional[str]

The default callback auth cert to use. This only applies if default_callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_key Optional[str]

The default callback auth key to use. This only applies if default_callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None public_inference Optional[bool]

If True, this endpoint will be available to all user IDs for inference.

None update_if_exists bool

If True, will attempt to update the endpoint if it exists. Otherwise, will unconditionally try to create a new endpoint. Note that endpoint names for a given user must be unique, so attempting to call this function with update_if_exists=False for an existing endpoint will raise an error.

False labels Optional[Dict[str, str]]

An optional dictionary of key/value pairs to associate with this endpoint.

None

Returns:

Type Description

A Endpoint object that can be used to make requests to the endpoint.

"},{"location":"api/client/#launch.client.LaunchClient.create_model_bundle","title":"create_model_bundle","text":"
create_model_bundle(model_bundle_name: str, env_params: Dict[str, str], *, load_predict_fn: Optional[Callable[[LaunchModel_T], Callable[[Any], Any]]] = None, predict_fn_or_cls: Optional[Callable[[Any], Any]] = None, requirements: Optional[List[str]] = None, model: Optional[LaunchModel_T] = None, load_model_fn: Optional[Callable[[], LaunchModel_T]] = None, app_config: Optional[Union[Dict[str, Any], str]] = None, globals_copy: Optional[Dict[str, Any]] = None, request_schema: Optional[Type[BaseModel]] = None, response_schema: Optional[Type[BaseModel]] = None) -> ModelBundle\n
Warning

This method is deprecated. Use create_model_bundle_from_callable_v2 instead.

Parameters:

Name Type Description Default model_bundle_name str

The name of the model bundle you want to create. The name must be unique across all bundles that you own.

required predict_fn_or_cls Optional[Callable[[Any], Any]]

Function or a Callable class that runs end-to-end (pre/post processing and model inference) on the call. i.e. predict_fn_or_cls(REQUEST) -> RESPONSE.

None model Optional[LaunchModel_T]

Typically a trained Neural Network, e.g. a Pytorch module.

Exactly one of model and load_model_fn must be provided.

None load_model_fn Optional[Callable[[], LaunchModel_T]]

A function that, when run, loads a model. This function is essentially a deferred wrapper around the model argument.

Exactly one of model and load_model_fn must be provided.

None load_predict_fn Optional[Callable[[LaunchModel_T], Callable[[Any], Any]]]

Function that, when called with a model, returns a function that carries out inference.

If model is specified, then this is equivalent to: load_predict_fn(model, app_config=optional_app_config]) -> predict_fn

Otherwise, if load_model_fn is specified, then this is equivalent to: load_predict_fn(load_model_fn(), app_config=optional_app_config]) -> predict_fn

In both cases, predict_fn is then the inference function, i.e.: predict_fn(REQUEST) -> RESPONSE

None requirements Optional[List[str]]

A list of python package requirements, where each list element is of the form <package_name>==<package_version>, e.g.

[\"tensorflow==2.3.0\", \"tensorflow-hub==0.11.0\"]

If you do not pass in a value for requirements, then you must pass in globals() for the globals_copy argument.

None app_config Optional[Union[Dict[str, Any], str]]

Either a Dictionary that represents a YAML file contents or a local path to a YAML file.

None env_params Dict[str, str]

A dictionary that dictates environment information e.g. the use of pytorch or tensorflow, which base image tag to use, etc. Specifically, the dictionary should contain the following keys:

  • framework_type: either tensorflow or pytorch. - PyTorch fields: - pytorch_image_tag: An image tag for the pytorch docker base image. The list of tags can be found from https://hub.docker.com/r/pytorch/pytorch/tags. - Example:

    .. code-block:: python

    { \"framework_type\": \"pytorch\", \"pytorch_image_tag\": \"1.10.0-cuda11.3-cudnn8-runtime\" }

  • Tensorflow fields:

    • tensorflow_version: Version of tensorflow, e.g. \"2.3.0\".
required globals_copy Optional[Dict[str, Any]]

Dictionary of the global symbol table. Normally provided by globals() built-in function.

None request_schema Optional[Type[BaseModel]]

A pydantic model that represents the request schema for the model bundle. This is used to validate the request body for the model bundle's endpoint.

None response_schema Optional[Type[BaseModel]]

A pydantic model that represents the request schema for the model bundle. This is used to validate the response for the model bundle's endpoint. Note: If request_schema is specified, then response_schema must also be specified.

None"},{"location":"api/client/#launch.client.LaunchClient.create_model_bundle_from_callable_v2","title":"create_model_bundle_from_callable_v2","text":"
create_model_bundle_from_callable_v2(*, model_bundle_name: str, load_predict_fn: Callable[[LaunchModel_T], Callable[[Any], Any]], load_model_fn: Callable[[], LaunchModel_T], request_schema: Type[BaseModel], response_schema: Type[BaseModel], requirements: Optional[List[str]] = None, pytorch_image_tag: Optional[str] = None, tensorflow_version: Optional[str] = None, custom_base_image_repository: Optional[str] = None, custom_base_image_tag: Optional[str] = None, app_config: Optional[Union[Dict[str, Any], str]] = None, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response\n

Uploads and registers a model bundle to Scale Launch.

Parameters:

Name Type Description Default model_bundle_name str

Name of the model bundle.

required load_predict_fn Callable[[LaunchModel_T], Callable[[Any], Any]]

Function that takes in a model and returns a predict function. When your model bundle is deployed, this predict function will be called as follows:

input = {\"input\": \"some input\"} # or whatever your request schema is.\n\ndef load_model_fn():\n    # load model\n    return model\n\ndef load_predict_fn(model, app_config=None):\n    def predict_fn(input):\n        # do pre-processing\n        output = model(input)\n        # do post-processing\n        return output\n    return predict_fn\n\npredict_fn = load_predict_fn(load_model_fn(), app_config=optional_app_config)\nresponse = predict_fn(input)\n

required load_model_fn Callable[[], LaunchModel_T]

A function that, when run, loads a model.

required request_schema Type[BaseModel]

A pydantic model that represents the request schema for the model bundle. This is used to validate the request body for the model bundle's endpoint.

required response_schema Type[BaseModel]

A pydantic model that represents the request schema for the model bundle. This is used to validate the response for the model bundle's endpoint.

required requirements Optional[List[str]]

List of pip requirements.

None pytorch_image_tag Optional[str]

The image tag for the PyTorch image that will be used to run the bundle. Exactly one of pytorch_image_tag, tensorflow_version, or custom_base_image_repository must be specified.

None tensorflow_version Optional[str]

The version of TensorFlow that will be used to run the bundle. If not specified, the default version will be used. Exactly one of pytorch_image_tag, tensorflow_version, or custom_base_image_repository must be specified.

None custom_base_image_repository Optional[str]

The repository for a custom base image that will be used to run the bundle. If not specified, the default base image will be used. Exactly one of pytorch_image_tag, tensorflow_version, or custom_base_image_repository must be specified.

None custom_base_image_tag Optional[str]

The tag for a custom base image that will be used to run the bundle. Must be specified if custom_base_image_repository is specified.

None app_config Optional[Union[Dict[str, Any], str]]

An optional dictionary of configuration values that will be passed to the bundle when it is run. These values can be accessed by the bundle via the app_config global variable.

None metadata Optional[Dict[str, Any]]

Metadata to record with the bundle.

None

Returns:

Type Description CreateModelBundleV2Response

An object containing the following keys:

  • model_bundle_id: The ID of the created model bundle.
"},{"location":"api/client/#launch.client.LaunchClient.create_model_bundle_from_dirs","title":"create_model_bundle_from_dirs","text":"
create_model_bundle_from_dirs(*, model_bundle_name: str, base_paths: List[str], requirements_path: str, env_params: Dict[str, str], load_predict_fn_module_path: str, load_model_fn_module_path: str, app_config: Optional[Union[Dict[str, Any], str]] = None, request_schema: Optional[Type[BaseModel]] = None, response_schema: Optional[Type[BaseModel]] = None) -> ModelBundle\n
Warning

This method is deprecated. Use create_model_bundle_from_dirs_v2 instead.

Parameters:

Name Type Description Default model_bundle_name str

The name of the model bundle you want to create. The name must be unique across all bundles that you own.

required base_paths List[str]

The paths on the local filesystem where the bundle code lives.

required requirements_path str

A path on the local filesystem where a requirements.txt file lives.

required env_params Dict[str, str]

A dictionary that dictates environment information e.g. the use of pytorch or tensorflow, which base image tag to use, etc. Specifically, the dictionary should contain the following keys:

  • framework_type: either tensorflow or pytorch.
  • PyTorch fields:
    • pytorch_image_tag: An image tag for the pytorch docker base image. The list of tags can be found from https://hub.docker.com/r/pytorch/pytorch/tags

Example:

{\n\"framework_type\": \"pytorch\",\n\"pytorch_image_tag\": \"1.10.0-cuda11.3-cudnn8-runtime\",\n}\n

required load_predict_fn_module_path str

A python module path for a function that, when called with the output of load_model_fn_module_path, returns a function that carries out inference.

required load_model_fn_module_path str

A python module path for a function that returns a model. The output feeds into the function located at load_predict_fn_module_path.

required app_config Optional[Union[Dict[str, Any], str]]

Either a Dictionary that represents a YAML file contents or a local path to a YAML file.

None request_schema Optional[Type[BaseModel]]

A pydantic model that represents the request schema for the model bundle. This is used to validate the request body for the model bundle's endpoint.

None response_schema Optional[Type[BaseModel]]

A pydantic model that represents the request schema for the model bundle. This is used to validate the response for the model bundle's endpoint. Note: If request_schema is specified, then response_schema must also be specified.

None"},{"location":"api/client/#launch.client.LaunchClient.create_model_bundle_from_dirs_v2","title":"create_model_bundle_from_dirs_v2","text":"
create_model_bundle_from_dirs_v2(*, model_bundle_name: str, base_paths: List[str], load_predict_fn_module_path: str, load_model_fn_module_path: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], requirements_path: Optional[str] = None, pytorch_image_tag: Optional[str] = None, tensorflow_version: Optional[str] = None, custom_base_image_repository: Optional[str] = None, custom_base_image_tag: Optional[str] = None, app_config: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response\n

Packages up code from one or more local filesystem folders and uploads them as a bundle to Scale Launch. In this mode, a bundle is just local code instead of a serialized object.

For example, if you have a directory structure like so, and your current working directory is my_root:

   my_root/\n       my_module1/\n           __init__.py\n           ...files and directories\n           my_inference_file.py\n       my_module2/\n           __init__.py\n           ...files and directories\n

then calling create_model_bundle_from_dirs_v2 with base_paths=[\"my_module1\", \"my_module2\"] essentially creates a zip file without the root directory, e.g.:

   my_module1/\n       __init__.py\n       ...files and directories\n       my_inference_file.py\n   my_module2/\n       __init__.py\n       ...files and directories\n

and these contents will be unzipped relative to the server side application root. Bear these points in mind when referencing Python module paths for this bundle. For instance, if my_inference_file.py has def f(...) as the desired inference loading function, then the load_predict_fn_module_path argument should be my_module1.my_inference_file.f.

Parameters:

Name Type Description Default model_bundle_name str

The name of the model bundle you want to create.

required base_paths List[str]

A list of paths to directories that will be zipped up and uploaded as a bundle. Each path must be relative to the current working directory.

required load_predict_fn_module_path str

The Python module path to the function that will be used to load the model for inference. This function should take in a path to a model directory, and return a model object. The model object should be pickleable.

required load_model_fn_module_path str

The Python module path to the function that will be used to load the model for training. This function should take in a path to a model directory, and return a model object. The model object should be pickleable.

required request_schema Type[BaseModel]

A Pydantic model that defines the request schema for the bundle.

required response_schema Type[BaseModel]

A Pydantic model that defines the response schema for the bundle.

required requirements_path Optional[str]

Path to a requirements.txt file that will be used to install dependencies for the bundle. This file must be relative to the current working directory.

None pytorch_image_tag Optional[str]

The image tag for the PyTorch image that will be used to run the bundle. Exactly one of pytorch_image_tag, tensorflow_version, or custom_base_image_repository must be specified.

None tensorflow_version Optional[str]

The version of TensorFlow that will be used to run the bundle. If not specified, the default version will be used. Exactly one of pytorch_image_tag, tensorflow_version, or custom_base_image_repository must be specified.

None custom_base_image_repository Optional[str]

The repository for a custom base image that will be used to run the bundle. If not specified, the default base image will be used. Exactly one of pytorch_image_tag, tensorflow_version, or custom_base_image_repository must be specified.

None custom_base_image_tag Optional[str]

The tag for a custom base image that will be used to run the bundle. Must be specified if custom_base_image_repository is specified.

None app_config Optional[Dict[str, Any]]

An optional dictionary of configuration values that will be passed to the bundle when it is run. These values can be accessed by the bundle via the app_config global variable.

None metadata Optional[Dict[str, Any]]

Metadata to record with the bundle.

None

Returns:

Type Description CreateModelBundleV2Response

An object containing the following keys:

  • model_bundle_id: The ID of the created model bundle.
"},{"location":"api/client/#launch.client.LaunchClient.create_model_bundle_from_runnable_image_v2","title":"create_model_bundle_from_runnable_image_v2","text":"
create_model_bundle_from_runnable_image_v2(*, model_bundle_name: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], repository: str, tag: str, command: List[str], healthcheck_route: Optional[str] = None, predict_route: Optional[str] = None, env: Dict[str, str], readiness_initial_delay_seconds: int, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response\n

Create a model bundle from a runnable image. The specified command must start a process that will listen for requests on port 5005 using HTTP.

Inference requests must be served at the POST /predict route while the GET /readyz route is a healthcheck.

Parameters:

Name Type Description Default model_bundle_name str

The name of the model bundle you want to create.

required request_schema Type[BaseModel]

A Pydantic model that defines the request schema for the bundle.

required response_schema Type[BaseModel]

A Pydantic model that defines the response schema for the bundle.

required repository str

The name of the Docker repository for the runnable image.

required tag str

The tag for the runnable image.

required command List[str]

The command that will be used to start the process that listens for requests.

required predict_route Optional[str]

The endpoint route on the runnable image that will be called.

None healthcheck_route Optional[str]

The healthcheck endpoint route on the runnable image.

None env Dict[str, str]

A dictionary of environment variables that will be passed to the bundle when it is run.

required readiness_initial_delay_seconds int

The number of seconds to wait for the HTTP server to become ready and successfully respond on its healthcheck.

required metadata Optional[Dict[str, Any]]

Metadata to record with the bundle.

None

Returns:

Type Description CreateModelBundleV2Response

An object containing the following keys:

  • model_bundle_id: The ID of the created model bundle.
"},{"location":"api/client/#launch.client.LaunchClient.create_model_bundle_from_streaming_enhanced_runnable_image_v2","title":"create_model_bundle_from_streaming_enhanced_runnable_image_v2","text":"
create_model_bundle_from_streaming_enhanced_runnable_image_v2(*, model_bundle_name: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], repository: str, tag: str, command: Optional[List[str]] = None, healthcheck_route: Optional[str] = None, predict_route: Optional[str] = None, streaming_command: List[str], streaming_predict_route: Optional[str] = None, env: Dict[str, str], readiness_initial_delay_seconds: int, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response\n

Create a model bundle from a runnable image. The specified command must start a process that will listen for requests on port 5005 using HTTP.

Inference requests must be served at the POST /predict route while the GET /readyz route is a healthcheck.

Parameters:

Name Type Description Default model_bundle_name str

The name of the model bundle you want to create.

required request_schema Type[BaseModel]

A Pydantic model that defines the request schema for the bundle.

required response_schema Type[BaseModel]

A Pydantic model that defines the response schema for the bundle.

required repository str

The name of the Docker repository for the runnable image.

required tag str

The tag for the runnable image.

required command Optional[List[str]]

The command that will be used to start the process that listens for requests if this bundle is used as a SYNC or ASYNC endpoint.

None healthcheck_route Optional[str]

The healthcheck endpoint route on the runnable image.

None predict_route Optional[str]

The endpoint route on the runnable image that will be called if this bundle is used as a SYNC or ASYNC endpoint.

None streaming_command List[str]

The command that will be used to start the process that listens for requests if this bundle is used as a STREAMING endpoint.

required streaming_predict_route Optional[str]

The endpoint route on the runnable image that will be called if this bundle is used as a STREAMING endpoint.

None env Dict[str, str]

A dictionary of environment variables that will be passed to the bundle when it is run.

required readiness_initial_delay_seconds int

The number of seconds to wait for the HTTP server to become ready and successfully respond on its healthcheck.

required metadata Optional[Dict[str, Any]]

Metadata to record with the bundle.

None

Returns:

Type Description CreateModelBundleV2Response

An object containing the following keys:

  • model_bundle_id: The ID of the created model bundle.
"},{"location":"api/client/#launch.client.LaunchClient.create_model_bundle_from_triton_enhanced_runnable_image_v2","title":"create_model_bundle_from_triton_enhanced_runnable_image_v2","text":"
create_model_bundle_from_triton_enhanced_runnable_image_v2(*, model_bundle_name: str, request_schema: Type[BaseModel], response_schema: Type[BaseModel], repository: str, tag: str, command: List[str], healthcheck_route: Optional[str] = None, predict_route: Optional[str] = None, env: Dict[str, str], readiness_initial_delay_seconds: int, triton_model_repository: str, triton_model_replicas: Optional[Dict[str, str]] = None, triton_num_cpu: float, triton_commit_tag: str, triton_storage: Optional[str] = None, triton_memory: Optional[str] = None, triton_readiness_initial_delay_seconds: int, metadata: Optional[Dict[str, Any]] = None) -> CreateModelBundleV2Response\n

Create a model bundle from a runnable image and a tritonserver image.

Same requirements as :param:create_model_bundle_from_runnable_image_v2 with additional constraints necessary for configuring tritonserver's execution.

Parameters:

Name Type Description Default model_bundle_name str

The name of the model bundle you want to create.

required request_schema Type[BaseModel]

A Pydantic model that defines the request schema for the bundle.

required response_schema Type[BaseModel]

A Pydantic model that defines the response schema for the bundle.

required repository str

The name of the Docker repository for the runnable image.

required tag str

The tag for the runnable image.

required command List[str]

The command that will be used to start the process that listens for requests.

required predict_route Optional[str]

The endpoint route on the runnable image that will be called.

None healthcheck_route Optional[str]

The healthcheck endpoint route on the runnable image.

None env Dict[str, str]

A dictionary of environment variables that will be passed to the bundle when it is run.

required readiness_initial_delay_seconds int

The number of seconds to wait for the HTTP server to become ready and successfully respond on its healthcheck.

required triton_model_repository str

The S3 prefix that contains the contents of the model repository, formatted according to https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_repository.md

required triton_model_replicas Optional[Dict[str, str]]

If supplied, the name and number of replicas to make for each model.

None triton_num_cpu float

Number of CPUs, fractional, to allocate to tritonserver.

required triton_commit_tag str

The image tag of the specific trionserver version.

required triton_storage Optional[str]

Amount of storage space to allocate for the tritonserver container.

None triton_memory Optional[str]

Amount of memory to allocate for the tritonserver container.

None triton_readiness_initial_delay_seconds int

Like readiness_initial_delay_seconds, but for tritonserver's own healthcheck.

required metadata Optional[Dict[str, Any]]

Metadata to record with the bundle.

None

Returns:

Type Description CreateModelBundleV2Response

An object containing the following keys:

  • model_bundle_id: The ID of the created model bundle.
"},{"location":"api/client/#launch.client.LaunchClient.create_model_endpoint","title":"create_model_endpoint","text":"
create_model_endpoint(*, endpoint_name: str, model_bundle: Union[ModelBundle, str], cpus: int = 3, memory: str = '8Gi', storage: str = '16Gi', gpus: int = 0, min_workers: int = 1, max_workers: int = 1, per_worker: int = 10, gpu_type: Optional[str] = None, endpoint_type: str = 'sync', high_priority: Optional[bool] = False, post_inference_hooks: Optional[List[PostInferenceHooks]] = None, default_callback_url: Optional[str] = None, default_callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, default_callback_auth_username: Optional[str] = None, default_callback_auth_password: Optional[str] = None, default_callback_auth_cert: Optional[str] = None, default_callback_auth_key: Optional[str] = None, public_inference: Optional[bool] = None, update_if_exists: bool = False, labels: Optional[Dict[str, str]] = None) -> Optional[Endpoint]\n

Creates and registers a model endpoint in Scale Launch. The returned object is an instance of type Endpoint, which is a base class of either SyncEndpoint or AsyncEndpoint. This is the object to which you sent inference requests.

Parameters:

Name Type Description Default endpoint_name str

The name of the model endpoint you want to create. The name must be unique across all endpoints that you own.

required model_bundle Union[ModelBundle, str]

The ModelBundle that the endpoint should serve.

required cpus int

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater than or equal to 1.

3 memory str

Amount of memory each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of memory.

'8Gi' storage str

Amount of local ephemeral storage each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of storage.

'16Gi' gpus int

Number of gpus each worker should get, e.g. 0, 1, etc.

0 min_workers int

The minimum number of workers. Must be greater than or equal to 0. This should be determined by computing the minimum throughput of your workload and dividing it by the throughput of a single worker. This field must be at least 1 for synchronous endpoints.

1 max_workers int

The maximum number of workers. Must be greater than or equal to 0, and as well as greater than or equal to min_workers. This should be determined by computing the maximum throughput of your workload and dividing it by the throughput of a single worker.

1 per_worker int

The maximum number of concurrent requests that an individual worker can service. Launch automatically scales the number of workers for the endpoint so that each worker is processing per_worker requests, subject to the limits defined by min_workers and max_workers.

  • If the average number of concurrent requests per worker is lower than per_worker, then the number of workers will be reduced. - Otherwise, if the average number of concurrent requests per worker is higher than per_worker, then the number of workers will be increased to meet the elevated traffic.

Here is our recommendation for computing per_worker:

  1. Compute min_workers and max_workers per your minimum and maximum throughput requirements. 2. Determine a value for the maximum number of concurrent requests in the workload. Divide this number by max_workers. Doing this ensures that the number of workers will \"climb\" to max_workers.
10 gpu_type Optional[str]

If specifying a non-zero number of gpus, this controls the type of gpu requested. Here are the supported values:

  • nvidia-tesla-t4
  • nvidia-ampere-a10
  • nvidia-hopper-h100
  • nvidia-hopper-h100-1g20g
  • nvidia-hopper-h100-3g40g
None endpoint_type str

Either \"sync\", \"async\", or \"streaming\".

'sync' high_priority Optional[bool]

Either True or False. Enabling this will allow the created endpoint to leverage the shared pool of prewarmed nodes for faster spinup time.

False post_inference_hooks Optional[List[PostInferenceHooks]]

List of hooks to trigger after inference tasks are served.

None default_callback_url Optional[str]

The default callback url to use for async endpoints. This can be overridden in the task parameters for each individual task. post_inference_hooks must contain \"callback\" for the callback to be triggered.

None default_callback_auth_kind Optional[Literal['basic', 'mtls']]

The default callback auth kind to use for async endpoints. Either \"basic\" or \"mtls\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_username Optional[str]

The default callback auth username to use. This only applies if default_callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_password Optional[str]

The default callback auth password to use. This only applies if default_callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_cert Optional[str]

The default callback auth cert to use. This only applies if default_callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_key Optional[str]

The default callback auth key to use. This only applies if default_callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None public_inference Optional[bool]

If True, this endpoint will be available to all user IDs for inference.

None update_if_exists bool

If True, will attempt to update the endpoint if it exists. Otherwise, will unconditionally try to create a new endpoint. Note that endpoint names for a given user must be unique, so attempting to call this function with update_if_exists=False for an existing endpoint will raise an error.

False labels Optional[Dict[str, str]]

An optional dictionary of key/value pairs to associate with this endpoint.

None

Returns:

Type Description Optional[Endpoint]

A Endpoint object that can be used to make requests to the endpoint.

"},{"location":"api/client/#launch.client.LaunchClient.delete_file","title":"delete_file","text":"
delete_file(file_id: str) -> DeleteFileResponse\n

Delete a file

Parameters:

Name Type Description Default file_id str

ID of the file

required

Returns:

Name Type Description DeleteFileResponse DeleteFileResponse

whether the deletion was successful

"},{"location":"api/client/#launch.client.LaunchClient.delete_llm_model_endpoint","title":"delete_llm_model_endpoint","text":"
delete_llm_model_endpoint(model_endpoint_name: str) -> bool\n

Deletes an LLM model endpoint.

Parameters:

Name Type Description Default model_endpoint_name str

The name of the model endpoint to delete.

required"},{"location":"api/client/#launch.client.LaunchClient.delete_model_endpoint","title":"delete_model_endpoint","text":"
delete_model_endpoint(model_endpoint_name: str)\n

Deletes a model endpoint.

Parameters:

Name Type Description Default model_endpoint

A ModelEndpoint object.

required"},{"location":"api/client/#launch.client.LaunchClient.edit_model_endpoint","title":"edit_model_endpoint","text":"
edit_model_endpoint(*, model_endpoint: Union[ModelEndpoint, str], model_bundle: Optional[Union[ModelBundle, str]] = None, cpus: Optional[float] = None, memory: Optional[str] = None, storage: Optional[str] = None, gpus: Optional[int] = None, min_workers: Optional[int] = None, max_workers: Optional[int] = None, per_worker: Optional[int] = None, gpu_type: Optional[str] = None, high_priority: Optional[bool] = None, post_inference_hooks: Optional[List[PostInferenceHooks]] = None, default_callback_url: Optional[str] = None, default_callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, default_callback_auth_username: Optional[str] = None, default_callback_auth_password: Optional[str] = None, default_callback_auth_cert: Optional[str] = None, default_callback_auth_key: Optional[str] = None, public_inference: Optional[bool] = None) -> None\n

Edits an existing model endpoint. Here are the fields that cannot be edited on an existing endpoint:

  • The endpoint's name. - The endpoint's type (i.e. you cannot go from a SyncEnpdoint to an AsyncEndpoint or vice versa.

Parameters:

Name Type Description Default model_endpoint Union[ModelEndpoint, str]

The model endpoint (or its name) you want to edit. The name must be unique across all endpoints that you own.

required model_bundle Optional[Union[ModelBundle, str]]

The ModelBundle that the endpoint should serve.

None cpus Optional[float]

Number of cpus each worker should get, e.g. 1, 2, etc. This must be greater than or equal to 1.

None memory Optional[str]

Amount of memory each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of memory.

None storage Optional[str]

Amount of local ephemeral storage each worker should get, e.g. \"4Gi\", \"512Mi\", etc. This must be a positive amount of storage.

None gpus Optional[int]

Number of gpus each worker should get, e.g. 0, 1, etc.

None min_workers Optional[int]

The minimum number of workers. Must be greater than or equal to 0.

None max_workers Optional[int]

The maximum number of workers. Must be greater than or equal to 0, and as well as greater than or equal to min_workers.

None per_worker Optional[int]

The maximum number of concurrent requests that an individual worker can service. Launch automatically scales the number of workers for the endpoint so that each worker is processing per_worker requests:

  • If the average number of concurrent requests per worker is lower than per_worker, then the number of workers will be reduced. - Otherwise, if the average number of concurrent requests per worker is higher than per_worker, then the number of workers will be increased to meet the elevated traffic.
None gpu_type Optional[str]

If specifying a non-zero number of gpus, this controls the type of gpu requested. Here are the supported values:

  • nvidia-tesla-t4
  • nvidia-ampere-a10
  • nvidia-hopper-h100
  • nvidia-hopper-h100-1g20g
  • nvidia-hopper-h100-3g40g
None high_priority Optional[bool]

Either True or False. Enabling this will allow the created endpoint to leverage the shared pool of prewarmed nodes for faster spinup time.

None post_inference_hooks Optional[List[PostInferenceHooks]]

List of hooks to trigger after inference tasks are served.

None default_callback_url Optional[str]

The default callback url to use for async endpoints. This can be overridden in the task parameters for each individual task. post_inference_hooks must contain \"callback\" for the callback to be triggered.

None default_callback_auth_kind Optional[Literal['basic', 'mtls']]

The default callback auth kind to use for async endpoints. Either \"basic\" or \"mtls\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_username Optional[str]

The default callback auth username to use. This only applies if default_callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_password Optional[str]

The default callback auth password to use. This only applies if default_callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_cert Optional[str]

The default callback auth cert to use. This only applies if default_callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None default_callback_auth_key Optional[str]

The default callback auth key to use. This only applies if default_callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None public_inference Optional[bool]

If True, this endpoint will be available to all user IDs for inference.

None"},{"location":"api/client/#launch.client.LaunchClient.get_batch_async_response","title":"get_batch_async_response","text":"
get_batch_async_response(batch_job_id: str) -> Dict[str, Any]\n

Gets inference results from a previously created batch job.

Parameters:

Name Type Description Default batch_job_id str

An id representing the batch task job. This id is the in the response from calling batch_async_request.

required

Returns:

Type Description Dict[str, Any]

A dictionary that contains the following fields:

Dict[str, Any]
  • status: The status of the job.
Dict[str, Any]
  • result: The url where the result is stored.
Dict[str, Any]
  • duration: A string representation of how long the job took to finish or how long it has been running, for a job current in progress.
Dict[str, Any]
  • num_tasks_pending: The number of tasks that are still pending.
Dict[str, Any]
  • num_tasks_completed: The number of tasks that have completed.
"},{"location":"api/client/#launch.client.LaunchClient.get_docker_image_batch_job","title":"get_docker_image_batch_job","text":"
get_docker_image_batch_job(batch_job_id: str)\n

For self hosted mode only. Gets information about a batch job given a batch job id.

"},{"location":"api/client/#launch.client.LaunchClient.get_docker_image_batch_job_bundle","title":"get_docker_image_batch_job_bundle","text":"
get_docker_image_batch_job_bundle(docker_image_batch_job_bundle_id: str) -> DockerImageBatchJobBundleResponse\n

For self hosted mode only. Gets information for a single batch job bundle with a given id.

"},{"location":"api/client/#launch.client.LaunchClient.get_file","title":"get_file","text":"
get_file(file_id: str) -> GetFileResponse\n

Get metadata about a file

Parameters:

Name Type Description Default file_id str

ID of the file

required

Returns:

Name Type Description GetFileResponse GetFileResponse

ID, filename, and size of the requested file

"},{"location":"api/client/#launch.client.LaunchClient.get_file_content","title":"get_file_content","text":"
get_file_content(file_id: str) -> GetFileContentResponse\n

Get a file's content

Parameters:

Name Type Description Default file_id str

ID of the file

required

Returns:

Name Type Description GetFileContentResponse GetFileContentResponse

ID and content of the requested file

"},{"location":"api/client/#launch.client.LaunchClient.get_fine_tune","title":"get_fine_tune","text":"
get_fine_tune(fine_tune_id: str) -> GetFineTuneResponse\n

Get status of a fine-tune

Parameters:

Name Type Description Default fine_tune_id str

ID of the fine-tune

required

Returns:

Name Type Description GetFineTuneResponse GetFineTuneResponse

ID and status of the requested fine-tune

"},{"location":"api/client/#launch.client.LaunchClient.get_fine_tune_events","title":"get_fine_tune_events","text":"
get_fine_tune_events(fine_tune_id: str) -> GetFineTuneEventsResponse\n

Get list of fine-tune events

Parameters:

Name Type Description Default fine_tune_id str

ID of the fine-tune

required

Returns:

Name Type Description GetFineTuneEventsResponse GetFineTuneEventsResponse

a list of all the events of the fine-tune

"},{"location":"api/client/#launch.client.LaunchClient.get_latest_docker_image_batch_job_bundle","title":"get_latest_docker_image_batch_job_bundle","text":"
get_latest_docker_image_batch_job_bundle(bundle_name: str) -> DockerImageBatchJobBundleResponse\n

For self hosted mode only. Gets information for the latest batch job bundle with a given name.

"},{"location":"api/client/#launch.client.LaunchClient.get_latest_model_bundle_v2","title":"get_latest_model_bundle_v2","text":"
get_latest_model_bundle_v2(model_bundle_name: str) -> ModelBundleV2Response\n

Get the latest version of a model bundle.

Parameters:

Name Type Description Default model_bundle_name str

The name of the model bundle you want to get.

required

Returns:

Type Description ModelBundleV2Response

An object containing the following keys:

  • id: The ID of the model bundle.
  • name: The name of the model bundle.
  • schema_location: The location of the schema for the model bundle.
  • flavor: The flavor of the model bundle. Either RunnableImage, CloudpickleArtifact, ZipArtifact, or TritonEnhancedRunnableImageFlavor.
  • created_at: The time the model bundle was created.
  • metadata: A dictionary of metadata associated with the model bundle.
  • model_artifact_ids: A list of IDs of model artifacts associated with the bundle.
"},{"location":"api/client/#launch.client.LaunchClient.get_llm_model_endpoint","title":"get_llm_model_endpoint","text":"
get_llm_model_endpoint(endpoint_name: str) -> Optional[Union[AsyncEndpoint, SyncEndpoint, StreamingEndpoint]]\n

Gets a model endpoint associated with a name that the user has access to.

Parameters:

Name Type Description Default endpoint_name str

The name of the endpoint to retrieve.

required"},{"location":"api/client/#launch.client.LaunchClient.get_model_bundle","title":"get_model_bundle","text":"
get_model_bundle(model_bundle: Union[ModelBundle, str]) -> ModelBundle\n

Returns a model bundle specified by bundle_name that the user owns.

Parameters:

Name Type Description Default model_bundle Union[ModelBundle, str]

The bundle or its name.

required

Returns:

Type Description ModelBundle

A ModelBundle object

"},{"location":"api/client/#launch.client.LaunchClient.get_model_bundle_v2","title":"get_model_bundle_v2","text":"
get_model_bundle_v2(model_bundle_id: str) -> ModelBundleV2Response\n

Get a model bundle.

Parameters:

Name Type Description Default model_bundle_id str

The ID of the model bundle you want to get.

required

Returns:

Type Description ModelBundleV2Response

An object containing the following fields:

  • id: The ID of the model bundle.
  • name: The name of the model bundle.
  • flavor: The flavor of the model bundle. Either RunnableImage, CloudpickleArtifact, ZipArtifact, or TritonEnhancedRunnableImageFlavor.
  • created_at: The time the model bundle was created.
  • metadata: A dictionary of metadata associated with the model bundle.
  • model_artifact_ids: A list of IDs of model artifacts associated with the bundle.
"},{"location":"api/client/#launch.client.LaunchClient.get_model_endpoint","title":"get_model_endpoint","text":"
get_model_endpoint(endpoint_name: str) -> Optional[Union[AsyncEndpoint, SyncEndpoint]]\n

Gets a model endpoint associated with a name.

Parameters:

Name Type Description Default endpoint_name str

The name of the endpoint to retrieve.

required"},{"location":"api/client/#launch.client.LaunchClient.list_docker_image_batch_job_bundles","title":"list_docker_image_batch_job_bundles","text":"
list_docker_image_batch_job_bundles(bundle_name: Optional[str] = None, order_by: Optional[Literal['newest', 'oldest']] = None) -> ListDockerImageBatchJobBundleResponse\n

For self hosted mode only. Gets information for multiple bundles.

Parameters:

Name Type Description Default bundle_name Optional[str]

The name of the bundles to retrieve. If not specified, this will retrieve all

None order_by Optional[Literal['newest', 'oldest']]

Either \"newest\", \"oldest\", or not specified. Specify to sort by newest/oldest.

None"},{"location":"api/client/#launch.client.LaunchClient.list_files","title":"list_files","text":"
list_files() -> ListFilesResponse\n

List files

Returns:

Name Type Description ListFilesResponse ListFilesResponse

list of all files (ID, filename, and size)

"},{"location":"api/client/#launch.client.LaunchClient.list_fine_tunes","title":"list_fine_tunes","text":"
list_fine_tunes() -> ListFineTunesResponse\n

List fine-tunes

Returns:

Name Type Description ListFineTunesResponse ListFineTunesResponse

list of all fine-tunes and their statuses

"},{"location":"api/client/#launch.client.LaunchClient.list_llm_model_endpoints","title":"list_llm_model_endpoints","text":"
list_llm_model_endpoints() -> List[Endpoint]\n

Lists all LLM model endpoints that the user has access to.

Returns:

Type Description List[Endpoint]

A list of ModelEndpoint objects.

"},{"location":"api/client/#launch.client.LaunchClient.list_model_bundles","title":"list_model_bundles","text":"
list_model_bundles() -> List[ModelBundle]\n

Returns a list of model bundles that the user owns.

Returns:

Type Description List[ModelBundle]

A list of ModelBundle objects

"},{"location":"api/client/#launch.client.LaunchClient.list_model_bundles_v2","title":"list_model_bundles_v2","text":"
list_model_bundles_v2() -> ListModelBundlesV2Response\n

List all model bundles.

Returns:

Type Description ListModelBundlesV2Response

An object containing the following keys:

  • model_bundles: A list of model bundles. Each model bundle is an object.
"},{"location":"api/client/#launch.client.LaunchClient.list_model_endpoints","title":"list_model_endpoints","text":"
list_model_endpoints() -> List[Endpoint]\n

Lists all model endpoints that the user owns.

Returns:

Type Description List[Endpoint]

A list of ModelEndpoint objects.

"},{"location":"api/client/#launch.client.LaunchClient.model_download","title":"model_download","text":"
model_download(model_name: str, download_format: str = 'hugging_face') -> ModelDownloadResponse\n

download a finetuned model

Parameters:

Name Type Description Default model_name str

name of the model to download

required download_format str

format of the model to download

'hugging_face'

Returns:

Name Type Description ModelDownloadResponse ModelDownloadResponse

dictionary with file names and urls to download the model

"},{"location":"api/client/#launch.client.LaunchClient.read_endpoint_creation_logs","title":"read_endpoint_creation_logs","text":"
read_endpoint_creation_logs(model_endpoint: Union[ModelEndpoint, str])\n

Retrieves the logs for the creation of the endpoint.

Parameters:

Name Type Description Default model_endpoint Union[ModelEndpoint, str]

The endpoint or its name.

required"},{"location":"api/client/#launch.client.LaunchClient.register_batch_csv_location_fn","title":"register_batch_csv_location_fn","text":"
register_batch_csv_location_fn(batch_csv_location_fn: Callable[[], str])\n

For self-hosted mode only. Registers a function that gives a location for batch CSV inputs. Should give different locations each time. This function is called as batch_csv_location_fn(), and should return a batch_csv_url that upload_batch_csv_fn can take.

Strictly, batch_csv_location_fn() does not need to return a str. The only requirement is that if batch_csv_location_fn returns a value of type T, then upload_batch_csv_fn() takes in an object of type T as its second argument (i.e. batch_csv_url).

Parameters:

Name Type Description Default batch_csv_location_fn Callable[[], str]

Function that generates batch_csv_urls for upload_batch_csv_fn.

required"},{"location":"api/client/#launch.client.LaunchClient.register_bundle_location_fn","title":"register_bundle_location_fn","text":"
register_bundle_location_fn(bundle_location_fn: Callable[[], str])\n

For self-hosted mode only. Registers a function that gives a location for a model bundle. Should give different locations each time. This function is called as bundle_location_fn(), and should return a bundle_url that register_upload_bundle_fn can take.

Strictly, bundle_location_fn() does not need to return a str. The only requirement is that if bundle_location_fn returns a value of type T, then upload_bundle_fn() takes in an object of type T as its second argument (i.e. bundle_url).

Parameters:

Name Type Description Default bundle_location_fn Callable[[], str]

Function that generates bundle_urls for upload_bundle_fn.

required"},{"location":"api/client/#launch.client.LaunchClient.register_upload_batch_csv_fn","title":"register_upload_batch_csv_fn","text":"
register_upload_batch_csv_fn(upload_batch_csv_fn: Callable[[str, str], None])\n

For self-hosted mode only. Registers a function that handles batch text upload. This function is called as

upload_batch_csv_fn(csv_text, csv_url)\n

This function should directly write the contents of csv_text as a text string into csv_url.

Parameters:

Name Type Description Default upload_batch_csv_fn Callable[[str, str], None]

Function that takes in a csv text (string type), and uploads that bundle to an appropriate location. Only needed for self-hosted mode.

required"},{"location":"api/client/#launch.client.LaunchClient.register_upload_bundle_fn","title":"register_upload_bundle_fn","text":"
register_upload_bundle_fn(upload_bundle_fn: Callable[[str, str], None])\n

For self-hosted mode only. Registers a function that handles model bundle upload. This function is called as

upload_bundle_fn(serialized_bundle, bundle_url)\n

This function should directly write the contents of serialized_bundle as a binary string into bundle_url.

See register_bundle_location_fn for more notes on the signature of upload_bundle_fn

Parameters:

Name Type Description Default upload_bundle_fn Callable[[str, str], None]

Function that takes in a serialized bundle (bytes type), and uploads that bundle to an appropriate location. Only needed for self-hosted mode.

required"},{"location":"api/client/#launch.client.LaunchClient.update_docker_image_batch_job","title":"update_docker_image_batch_job","text":"
update_docker_image_batch_job(batch_job_id: str, cancel: bool)\n

For self hosted mode only. Updates a batch job by id. Use this if you want to cancel/delete a batch job.

"},{"location":"api/client/#launch.client.LaunchClient.upload_file","title":"upload_file","text":"
upload_file(file_path: str) -> UploadFileResponse\n

Upload a file

Parameters:

Name Type Description Default file_path str

Path to a local file to upload.

required

Returns:

Name Type Description UploadFileResponse UploadFileResponse

ID of the created file

"},{"location":"api/endpoint_predictions/","title":"Endpoint Predictions","text":""},{"location":"api/endpoint_predictions/#launch.model_endpoint.EndpointRequest","title":"EndpointRequest","text":"
EndpointRequest(url: Optional[str] = None, args: Optional[Dict] = None, callback_url: Optional[str] = None, callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, callback_auth_username: Optional[str] = None, callback_auth_password: Optional[str] = None, callback_auth_cert: Optional[str] = None, callback_auth_key: Optional[str] = None, return_pickled: Optional[bool] = False, request_id: Optional[str] = None)\n

Represents a single request to either a SyncEndpoint, StreamingEndpoint, or AsyncEndpoint.

Parameters:

Name Type Description Default url Optional[str]

A url to some file that can be read in to a ModelBundle's predict function. Can be an image, raw text, etc. Note: the contents of the file located at url are opened as a sequence of bytes and passed to the predict function. If you instead want to pass the url itself as an input to the predict function, see args.

Exactly one of url and args must be specified.

None args Optional[Dict]

A Dictionary with arguments to a ModelBundle's predict function. If the predict function has signature predict_fn(foo, bar), then the keys in the dictionary should be \"foo\" and \"bar\". Values must be native Python objects.

Exactly one of url and args must be specified.

None return_pickled Optional[bool]

Whether the output should be a pickled python object, or directly returned serialized json.

False callback_url Optional[str]

The callback url to use for this task. If None, then the default_callback_url of the endpoint is used. The endpoint must specify \"callback\" as a post-inference hook for the callback to be triggered.

None callback_auth_kind Optional[Literal['basic', 'mtls']]

The default callback auth kind to use for async endpoints. Either \"basic\" or \"mtls\". This can be overridden in the task parameters for each individual task.

None callback_auth_username Optional[str]

The default callback auth username to use. This only applies if callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None callback_auth_password Optional[str]

The default callback auth password to use. This only applies if callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None callback_auth_cert Optional[str]

The default callback auth cert to use. This only applies if callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None callback_auth_key Optional[str]

The default callback auth key to use. This only applies if callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None request_id Optional[str]

(deprecated) A user-specifiable id for requests. Should be unique among EndpointRequests made in the same batch call. If one isn't provided the client will generate its own.

None"},{"location":"api/endpoint_predictions/#launch.model_endpoint.EndpointResponse","title":"EndpointResponse","text":"
EndpointResponse(client, status: str, result_url: Optional[str] = None, result: Optional[str] = None, traceback: Optional[str] = None)\n

Represents a response received from a Endpoint.

Parameters:

Name Type Description Default client

An instance of LaunchClient.

required status str

A string representing the status of the request, i.e. SUCCESS, FAILURE, or PENDING

required result_url Optional[str]

A string that is a url containing the pickled python object from the Endpoint's predict function.

Exactly one of result_url or result will be populated, depending on the value of return_pickled in the request.

None result Optional[str]

A string that is the serialized return value (in json form) of the Endpoint's predict function. Specifically, one can json.loads() the value of result to get the original python object back.

Exactly one of result_url or result will be populated, depending on the value of return_pickled in the request.

None traceback Optional[str]

The stack trace if the inference endpoint raised an error. Can be used for debugging

None"},{"location":"api/endpoint_predictions/#launch.model_endpoint.EndpointResponseFuture","title":"EndpointResponseFuture","text":"
EndpointResponseFuture(client, endpoint_name: str, async_task_id: str)\n

Represents a future response from an Endpoint. Specifically, when the EndpointResponseFuture is ready, then its get method will return an actual instance of EndpointResponse.

This object should not be directly instantiated by the user.

Parameters:

Name Type Description Default client

An instance of LaunchClient.

required endpoint_name str

The name of the endpoint.

required async_task_id str

An async task id.

required"},{"location":"api/endpoint_predictions/#launch.model_endpoint.EndpointResponseFuture.get","title":"get","text":"
get(timeout: Optional[float] = None) -> EndpointResponse\n

Retrieves the EndpointResponse for the prediction request after it completes. This method blocks.

Parameters:

Name Type Description Default timeout Optional[float]

The maximum number of seconds to wait for the response. If None, then the method will block indefinitely until the response is ready.

None"},{"location":"api/endpoint_predictions/#launch.model_endpoint.EndpointResponseStream","title":"EndpointResponseStream","text":"
EndpointResponseStream(response)\n

Bases: Iterator

Represents a stream response from an Endpoint. This object is iterable and yields EndpointResponse objects.

This object should not be directly instantiated by the user.

"},{"location":"api/endpoint_predictions/#launch.model_endpoint.EndpointResponseStream.__iter__","title":"__iter__","text":"
__iter__()\n

Uses server-sent events to iterate through the stream.

"},{"location":"api/endpoint_predictions/#launch.model_endpoint.EndpointResponseStream.__next__","title":"__next__","text":"
__next__()\n

Uses server-sent events to iterate through the stream.

"},{"location":"api/hooks/","title":"Hooks","text":""},{"location":"api/hooks/#launch.hooks.PostInferenceHooks","title":"PostInferenceHooks","text":"

Bases: str, Enum

Post-inference hooks are functions that are called after inference is complete.

Attributes:

Name Type Description CALLBACK str

The callback hook is called with the inference response and the task ID.

"},{"location":"api/llms/","title":"LLM APIs","text":"

We provide some APIs to conveniently create, list and inference with LLMs. Under the hood they are Launch model endpoints.

"},{"location":"api/llms/#example","title":"Example","text":"LLM APIs Usage
import os\nfrom rich import print\nfrom launch import LaunchClient\nfrom launch.api_client.model.llm_inference_framework import (\nLLMInferenceFramework,\n)\nfrom launch.api_client.model.llm_source import LLMSource\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"), endpoint=os.getenv(\"LAUNCH_ENDPOINT\"))\nendpoints = client.list_llm_model_endpoints()\nprint(endpoints)\nendpoint_name = \"test-flan-t5-xxl\"\nclient.create_llm_model_endpoint(\nendpoint_name=endpoint_name,\nmodel_name=\"flan-t5-xxl\",\nsource=LLMSource.HUGGING_FACE,\ninference_framework=LLMInferenceFramework.DEEPSPEED,\ninference_framework_image_tag=os.getenv(\"INFERENCE_FRAMEWORK_IMAGE_TAG\"),\nnum_shards=4,\nmin_workers=1,\nmax_workers=1,\ngpus=4,\nendpoint_type=\"sync\",\n)\n# Wait for the endpoint to be ready\noutput = client.completions_sync(endpoint_name, prompt=\"What is Deep Learning?\", max_new_tokens=10, temperature=0)\nprint(output)\n
"},{"location":"api/model_bundles/","title":"Model Bundles","text":""},{"location":"api/model_bundles/#launch.model_bundle.CloudpickleArtifactFlavor","title":"CloudpickleArtifactFlavor","text":"

Bases: BaseModel

"},{"location":"api/model_bundles/#launch.model_bundle.CloudpickleArtifactFlavor.app_config","title":"app_config instance-attribute","text":"
app_config: Optional[Dict[str, Any]]\n

Optional configuration for the application.

"},{"location":"api/model_bundles/#launch.model_bundle.CloudpickleArtifactFlavor.framework","title":"framework class-attribute instance-attribute","text":"
framework: Union[PytorchFramework, TensorflowFramework, CustomFramework] = Field(..., discriminator='framework_type')\n

Machine Learning framework specification. Either PytorchFramework, TensorflowFramework, or CustomFramework.

"},{"location":"api/model_bundles/#launch.model_bundle.CloudpickleArtifactFlavor.load_model_fn","title":"load_model_fn instance-attribute","text":"
load_model_fn: str\n

Function which, when called, returns the model object.

"},{"location":"api/model_bundles/#launch.model_bundle.CloudpickleArtifactFlavor.load_predict_fn","title":"load_predict_fn instance-attribute","text":"
load_predict_fn: str\n

Function which, when called, returns the prediction function.

"},{"location":"api/model_bundles/#launch.model_bundle.CloudpickleArtifactFlavor.requirements","title":"requirements instance-attribute","text":"
requirements: List[str]\n

List of requirements to install in the environment before running the model.

"},{"location":"api/model_bundles/#launch.model_bundle.CreateModelBundleV2Response","title":"CreateModelBundleV2Response","text":"

Bases: BaseModel

Response object for creating a Model Bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.CreateModelBundleV2Response.model_bundle_id","title":"model_bundle_id instance-attribute","text":"
model_bundle_id: str\n

ID of the Model Bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.CustomFramework","title":"CustomFramework","text":"

Bases: BaseModel

"},{"location":"api/model_bundles/#launch.model_bundle.CustomFramework.image_repository","title":"image_repository instance-attribute","text":"
image_repository: str\n

Docker image repository to use as the base image.

"},{"location":"api/model_bundles/#launch.model_bundle.CustomFramework.image_tag","title":"image_tag instance-attribute","text":"
image_tag: str\n

Docker image tag to use as the base image.

"},{"location":"api/model_bundles/#launch.model_bundle.ListModelBundlesV2Response","title":"ListModelBundlesV2Response","text":"

Bases: BaseModel

Response object for listing Model Bundles.

"},{"location":"api/model_bundles/#launch.model_bundle.ListModelBundlesV2Response.model_bundles","title":"model_bundles instance-attribute","text":"
model_bundles: List[ModelBundleV2Response]\n

A list of Model Bundles.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle","title":"ModelBundle dataclass","text":"

Represents a ModelBundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.app_config","title":"app_config class-attribute instance-attribute","text":"
app_config: Optional[Dict[Any, Any]] = None\n

An optional user-specified configuration mapping for the bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.env_params","title":"env_params class-attribute instance-attribute","text":"
env_params: Optional[Dict[str, str]] = None\n

A dictionary that dictates environment information. See LaunchClient.create_model_bundle for more information.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.id","title":"id class-attribute instance-attribute","text":"
id: Optional[str] = None\n

A globally unique identifier for the bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.location","title":"location class-attribute instance-attribute","text":"
location: Optional[str] = None\n

An opaque location for the bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.metadata","title":"metadata class-attribute instance-attribute","text":"
metadata: Optional[Dict[Any, Any]] = None\n

Arbitrary metadata for the bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.name","title":"name instance-attribute","text":"
name: str\n

The name of the bundle. Must be unique across all bundles that the user owns.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.packaging_type","title":"packaging_type class-attribute instance-attribute","text":"
packaging_type: Optional[str] = None\n

The packaging type for the bundle. Can be cloudpickle or zip.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundle.requirements","title":"requirements class-attribute instance-attribute","text":"
requirements: Optional[List[str]] = None\n

A list of Python package requirements for the bundle. See LaunchClient.create_model_bundle for more information.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundleV2Response","title":"ModelBundleV2Response","text":"

Bases: BaseModel

Response object for a single Model Bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundleV2Response.created_at","title":"created_at instance-attribute","text":"
created_at: datetime.datetime\n

Timestamp of when the Model Bundle was created.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundleV2Response.flavor","title":"flavor class-attribute instance-attribute","text":"
flavor: ModelBundleFlavors = Field(..., discriminator='flavor')\n

Flavor of the Model Bundle, representing how the model bundle was packaged.

See ModelBundleFlavors for details.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundleV2Response.id","title":"id instance-attribute","text":"
id: str\n

ID of the Model Bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundleV2Response.metadata","title":"metadata instance-attribute","text":"
metadata: Dict[str, Any]\n

Metadata associated with the Model Bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundleV2Response.model_artifact_ids","title":"model_artifact_ids instance-attribute","text":"
model_artifact_ids: List[str]\n

IDs of the Model Artifacts associated with the Model Bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.ModelBundleV2Response.name","title":"name instance-attribute","text":"
name: str\n

Name of the Model Bundle.

"},{"location":"api/model_bundles/#launch.model_bundle.PytorchFramework","title":"PytorchFramework","text":"

Bases: BaseModel

"},{"location":"api/model_bundles/#launch.model_bundle.PytorchFramework.pytorch_image_tag","title":"pytorch_image_tag instance-attribute","text":"
pytorch_image_tag: str\n

Image tag of the Pytorch image to use.

"},{"location":"api/model_bundles/#launch.model_bundle.RunnableImageFlavor","title":"RunnableImageFlavor","text":"

Bases: RunnableImageLike

Model bundles that use custom docker images that expose an HTTP server for inference.

"},{"location":"api/model_bundles/#launch.model_bundle.TensorflowFramework","title":"TensorflowFramework","text":"

Bases: BaseModel

"},{"location":"api/model_bundles/#launch.model_bundle.TensorflowFramework.tensorflow_version","title":"tensorflow_version instance-attribute","text":"
tensorflow_version: str\n

Tensorflow version to use.

"},{"location":"api/model_bundles/#launch.model_bundle.ZipArtifactFlavor","title":"ZipArtifactFlavor","text":"

Bases: BaseModel

"},{"location":"api/model_bundles/#launch.model_bundle.ZipArtifactFlavor.app_config","title":"app_config class-attribute instance-attribute","text":"
app_config: Optional[Dict[str, Any]] = None\n

Optional configuration for the application.

"},{"location":"api/model_bundles/#launch.model_bundle.ZipArtifactFlavor.framework","title":"framework class-attribute instance-attribute","text":"
framework: Union[PytorchFramework, TensorflowFramework, CustomFramework] = Field(..., discriminator='framework_type')\n

Machine Learning framework specification. Either PytorchFramework, TensorflowFramework, or CustomFramework.

"},{"location":"api/model_bundles/#launch.model_bundle.ZipArtifactFlavor.load_model_fn_module_path","title":"load_model_fn_module_path instance-attribute","text":"
load_model_fn_module_path: str\n

Path to the module to load the model object.

"},{"location":"api/model_bundles/#launch.model_bundle.ZipArtifactFlavor.load_predict_fn_module_path","title":"load_predict_fn_module_path instance-attribute","text":"
load_predict_fn_module_path: str\n

Path to the module to load the prediction function.

"},{"location":"api/model_bundles/#launch.model_bundle.ZipArtifactFlavor.requirements","title":"requirements instance-attribute","text":"
requirements: List[str]\n

List of requirements to install in the environment before running the model.

"},{"location":"api/model_endpoints/","title":"Model Endpoints","text":"

All classes here are returned by the get_model_endpoint method and provide a predict function.

"},{"location":"api/model_endpoints/#launch.model_endpoint.AsyncEndpoint","title":"AsyncEndpoint","text":"
AsyncEndpoint(model_endpoint: ModelEndpoint, client: ModelEndpoint)\n

Bases: Endpoint

An asynchronous model endpoint.

Parameters:

Name Type Description Default model_endpoint ModelEndpoint

ModelEndpoint object.

required client

A LaunchClient object

required"},{"location":"api/model_endpoints/#launch.model_endpoint.AsyncEndpoint.predict","title":"predict","text":"
predict(request: EndpointRequest) -> EndpointResponseFuture\n

Runs an asynchronous prediction request.

Parameters:

Name Type Description Default request EndpointRequest

The EndpointRequest object that contains the payload.

required

Returns:

Name Type Description EndpointResponseFuture

An EndpointResponseFuture such the user can use to query the status of the request.

Example EndpointResponseFuture EndpointResponseFuture

.. code-block:: python

my_endpoint = AsyncEndpoint(...) f: EndpointResponseFuture = my_endpoint.predict(EndpointRequest(...)) result = f.get() # blocks on completion

"},{"location":"api/model_endpoints/#launch.model_endpoint.AsyncEndpoint.predict_batch","title":"predict_batch","text":"
predict_batch(requests: Sequence[EndpointRequest]) -> AsyncEndpointBatchResponse\n

(deprecated) Runs inference on the data items specified by urls. Returns a AsyncEndpointResponse.

Parameters:

Name Type Description Default requests Sequence[EndpointRequest]

List of EndpointRequests. Request_ids must all be distinct.

required

Returns:

Type Description AsyncEndpointBatchResponse

an AsyncEndpointResponse keeping track of the inference requests made

"},{"location":"api/model_endpoints/#launch.model_endpoint.SyncEndpoint","title":"SyncEndpoint","text":"
SyncEndpoint(model_endpoint: ModelEndpoint, client: ModelEndpoint)\n

Bases: Endpoint

A synchronous model endpoint.

Parameters:

Name Type Description Default model_endpoint ModelEndpoint

ModelEndpoint object.

required client

A LaunchClient object

required"},{"location":"api/model_endpoints/#launch.model_endpoint.SyncEndpoint.predict","title":"predict","text":"
predict(request: EndpointRequest) -> EndpointResponse\n

Runs a synchronous prediction request.

Parameters:

Name Type Description Default request EndpointRequest

The EndpointRequest object that contains the payload.

required"},{"location":"api/model_endpoints/#launch.model_endpoint.StreamingEndpoint","title":"StreamingEndpoint","text":"
StreamingEndpoint(model_endpoint: ModelEndpoint, client: ModelEndpoint)\n

Bases: Endpoint

A synchronous model endpoint.

Parameters:

Name Type Description Default model_endpoint ModelEndpoint

ModelEndpoint object.

required client

A LaunchClient object

required"},{"location":"api/model_endpoints/#launch.model_endpoint.StreamingEndpoint.predict","title":"predict","text":"
predict(request: EndpointRequest) -> EndpointResponseStream\n

Runs a streaming prediction request.

Parameters:

Name Type Description Default request EndpointRequest

The EndpointRequest object that contains the payload.

required

Returns:

Type Description EndpointResponseStream

An EndpointResponseStream object that can be used to iterate through the stream.

"},{"location":"concepts/batch_jobs/","title":"Batch Jobs","text":"

For predicting over a larger set of tasks (> 50) at once, it is recommended to use batch jobs. Batch jobs are a way to send a large number of tasks to a model bundle. The tasks are processed in parallel, and the results are returned as a list of predictions.

Batch jobs are created using the batch_async_request method of the LaunchClient.

Creating and Following a Batch Job
import logging\nimport os\nimport time\nfrom launch import LaunchClient\nlogger = logging.getLogger(__name__)\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nbatch_job = client.batch_async_request(\nmodel_bundle=\"test-bundle\",\ninputs=[\n{\"x\": 2, \"y\": \"hello\"},\n{\"x\": 3, \"y\": \"world\"},\n],\ngpus=0,\nlabels={\n\"team\": \"MY_TEAM\",\n\"product\": \"MY_PRODUCT\",\n}\n)\nstatus = \"PENDING\"\nres = None\nwhile status != \"SUCCESS\" and status != \"FAILURE\" and status != \"CANCELLED\":\ntime.sleep(30)\nres = client.get_batch_async_response(batch_job[\"job_id\"])\nstatus = res[\"status\"]\nlogging.info(f\"the batch job is {status}\")\nlogging.info(res)\n
"},{"location":"concepts/callbacks/","title":"Callbacks","text":"

Async model endpoints can be configured to send callbacks to a user-defined callback URL. Callbacks are sent as HTTP POST requests with a JSON body. The following code snippet shows how to create an async model endpoint with a callback URL.

To configure an async endpoint to send callbacks, set the post_inference_hooks field to include launch.PostInferenceHooks.CALLBACK. A callback URL also needs to be specified, and it can be configured as a default using the default_callback_url argument to launch.LaunchClient.create_model_endpoint or as a per-task override using the callback_url field of launch.EndpointRequest.

Note

Callbacks will not be sent if the endpoint does not have any post-inference hooks specified, even if a default_callback_url is provided to the endpoint creation method or if the prediction request has a callback_url override.

Creating an Async Model Endpoint with a Callback URL
import os\nimport time\nfrom launch import EndpointRequest, LaunchClient, PostInferenceHooks\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.create_model_endpoint(\nendpoint_name=\"demo-endpoint-callback\",\nmodel_bundle=\"test-bundle\",\ncpus=1,\nmin_workers=1,\nendpoint_type=\"async\",\nupdate_if_exists=True,\nlabels={\n\"team\": \"MY_TEAM\",\n\"product\": \"MY_PRODUCT\",\n},\npost_inference_hooks=[PostInferenceHooks.CALLBACK],\ndefault_callback_url=\"https://example.com\",\n)\nwhile endpoint.status() != \"READY\":\ntime.sleep(10)\nfuture_default = endpoint.predict(\nrequest=EndpointRequest(args={\"x\": 2, \"y\": \"hello\"})\n)\n\"\"\"\nA callback is sent to https://example.com with the following JSON body:\n{\n    \"task_id\": \"THE_TASK_ID\",\n    \"result\": 7\n}\n\"\"\"\nfuture_custom_callback_url = endpoint.predict(\nrequest=EndpointRequest(\nargs={\"x\": 3, \"y\": \"hello\"}, callback_url=\"https://example.com/custom\"\n),\n)\n\"\"\"\nA callback is sent to https://example.com/custom with the following JSON body:\n{\n    \"task_id\": \"THE_TASK_ID\",\n    \"result\": 8\n}\n\"\"\"\n
"},{"location":"concepts/callbacks/#authentication-for-callbacks","title":"Authentication for callbacks","text":"

Warning

This feature is currently in beta, and the API is likely to change.

Callbacks can be authenticated using shared authentication headers. To enable authentication, set either default_callback_auth_kind when creating the endpoint or callback_auth_kind when making a prediction request.

Currently, the supported authentication methods are basic and mtls. If basic is used, then the default_callback_auth_username and default_callback_auth_password fields must be specified when creating the endpoint, or the callback_auth_username and callback_auth_password fields must be specified when making a prediction request. If mtls is used, then the same is true for the default_callback_auth_cert and default_callback_auth_key fields, or the callback_auth_cert and callback_auth_key fields.

Creating an Async Model Endpoint with custom Callback auth
import os\nimport time\nfrom launch import EndpointRequest, LaunchClient, PostInferenceHooks\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.create_model_endpoint(\nendpoint_name=\"demo-endpoint-callback\",\nmodel_bundle=\"test-bundle\",\ncpus=1,\nmin_workers=1,\nendpoint_type=\"async\",\nupdate_if_exists=True,\nlabels={\n\"team\": \"MY_TEAM\",\n\"product\": \"MY_PRODUCT\",\n},\npost_inference_hooks=[PostInferenceHooks.CALLBACK],\ndefault_callback_url=\"https://example.com\",\ndefault_callback_auth_kind=\"basic\",\ndefault_callback_auth_username=\"user\",\ndefault_callback_auth_password=\"password\",\n)\nwhile endpoint.status() != \"READY\":\ntime.sleep(10)\nfuture_default = endpoint.predict(\nrequest=EndpointRequest(args={\"x\": 2, \"y\": \"hello\"})\n)\n\"\"\"\nA callback is sent to https://example.com with (\"user\", \"password\") as the basic auth.\n\"\"\"\nfuture_custom_callback_auth = endpoint.predict(\nrequest=EndpointRequest(\nargs={\"x\": 3, \"y\": \"hello\"},\ncallback_auth_kind=\"mtls\", \ncallback_auth_cert=\"cert\", \ncallback_auth_key=\"key\",\n),\n)\n\"\"\"\nA callback is sent with mTLS authentication.\n\"\"\"\nclient.edit_model_endpoint(\nmodel_endpoint=endpoint.model_endpoint,\ndefault_callback_auth_kind=\"mtls\",\ndefault_callback_auth_cert=\"cert\",\ndefault_callback_auth_key=\"key\",\n)\nwhile endpoint.status() != \"READY\":\ntime.sleep(10)\nfuture_default = endpoint.predict(\nrequest=EndpointRequest(args={\"x\": 2, \"y\": \"hello\"})\n)\n\"\"\"\nA callback is sent with mTLS auth.\n\"\"\"\nfuture_custom_callback_auth = endpoint.predict(\nrequest=EndpointRequest(\nargs={\"x\": 3, \"y\": \"hello\"},\ncallback_auth_kind=\"basic\",\ncallback_auth_username=\"user\",\ncallback_auth_password=\"pass\",\n),\n)\n\"\"\"\nA callback is sent with (\"user\", \"pass\") as the basic auth.\n\"\"\"\n
"},{"location":"concepts/endpoint_predictions/","title":"Endpoint Predictions","text":"

Once endpoints have been created, users can send tasks to them to make predictions. The following code snippet shows how to send tasks to endpoints.

Sending a Task to an Async EndpointSending a Task to a Sync EndpointSending a Task to a Streaming Endpoint
import os\nfrom launch import EndpointRequest, LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.get_model_endpoint(\"demo-endpoint-async\")\nfuture = endpoint.predict(request=EndpointRequest(args={\"x\": 2, \"y\": \"hello\"}))\nresponse = future.get()\nprint(response)\n
import os\nfrom launch import EndpointRequest, LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.get_model_endpoint(\"demo-endpoint-sync\")\nresponse = endpoint.predict(request=EndpointRequest(args={\"x\": 2, \"y\": \"hello\"}))\nprint(response)\n
import os\nfrom launch import EndpointRequest, LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.get_model_endpoint(\"demo-endpoint-streaming\")\nresponse = endpoint.predict(request=EndpointRequest(args={\"x\": 2, \"y\": \"hello\"}))\nfor chunk in response:\nprint(chunk)\n
"},{"location":"concepts/endpoint_predictions/#launch.model_endpoint.EndpointRequest","title":"EndpointRequest","text":"
EndpointRequest(url: Optional[str] = None, args: Optional[Dict] = None, callback_url: Optional[str] = None, callback_auth_kind: Optional[Literal['basic', 'mtls']] = None, callback_auth_username: Optional[str] = None, callback_auth_password: Optional[str] = None, callback_auth_cert: Optional[str] = None, callback_auth_key: Optional[str] = None, return_pickled: Optional[bool] = False, request_id: Optional[str] = None)\n

Represents a single request to either a SyncEndpoint, StreamingEndpoint, or AsyncEndpoint.

Parameters:

Name Type Description Default url Optional[str]

A url to some file that can be read in to a ModelBundle's predict function. Can be an image, raw text, etc. Note: the contents of the file located at url are opened as a sequence of bytes and passed to the predict function. If you instead want to pass the url itself as an input to the predict function, see args.

Exactly one of url and args must be specified.

None args Optional[Dict]

A Dictionary with arguments to a ModelBundle's predict function. If the predict function has signature predict_fn(foo, bar), then the keys in the dictionary should be \"foo\" and \"bar\". Values must be native Python objects.

Exactly one of url and args must be specified.

None return_pickled Optional[bool]

Whether the output should be a pickled python object, or directly returned serialized json.

False callback_url Optional[str]

The callback url to use for this task. If None, then the default_callback_url of the endpoint is used. The endpoint must specify \"callback\" as a post-inference hook for the callback to be triggered.

None callback_auth_kind Optional[Literal['basic', 'mtls']]

The default callback auth kind to use for async endpoints. Either \"basic\" or \"mtls\". This can be overridden in the task parameters for each individual task.

None callback_auth_username Optional[str]

The default callback auth username to use. This only applies if callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None callback_auth_password Optional[str]

The default callback auth password to use. This only applies if callback_auth_kind is \"basic\". This can be overridden in the task parameters for each individual task.

None callback_auth_cert Optional[str]

The default callback auth cert to use. This only applies if callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None callback_auth_key Optional[str]

The default callback auth key to use. This only applies if callback_auth_kind is \"mtls\". This can be overridden in the task parameters for each individual task.

None request_id Optional[str]

(deprecated) A user-specifiable id for requests. Should be unique among EndpointRequests made in the same batch call. If one isn't provided the client will generate its own.

None"},{"location":"concepts/endpoint_predictions/#launch.model_endpoint.EndpointResponseFuture","title":"EndpointResponseFuture","text":"
EndpointResponseFuture(client, endpoint_name: str, async_task_id: str)\n

Represents a future response from an Endpoint. Specifically, when the EndpointResponseFuture is ready, then its get method will return an actual instance of EndpointResponse.

This object should not be directly instantiated by the user.

Parameters:

Name Type Description Default client

An instance of LaunchClient.

required endpoint_name str

The name of the endpoint.

required async_task_id str

An async task id.

required"},{"location":"concepts/endpoint_predictions/#launch.model_endpoint.EndpointResponseFuture.get","title":"get","text":"
get(timeout: Optional[float] = None) -> EndpointResponse\n

Retrieves the EndpointResponse for the prediction request after it completes. This method blocks.

Parameters:

Name Type Description Default timeout Optional[float]

The maximum number of seconds to wait for the response. If None, then the method will block indefinitely until the response is ready.

None"},{"location":"concepts/endpoint_predictions/#launch.model_endpoint.EndpointResponse","title":"EndpointResponse","text":"
EndpointResponse(client, status: str, result_url: Optional[str] = None, result: Optional[str] = None, traceback: Optional[str] = None)\n

Represents a response received from a Endpoint.

Parameters:

Name Type Description Default client

An instance of LaunchClient.

required status str

A string representing the status of the request, i.e. SUCCESS, FAILURE, or PENDING

required result_url Optional[str]

A string that is a url containing the pickled python object from the Endpoint's predict function.

Exactly one of result_url or result will be populated, depending on the value of return_pickled in the request.

None result Optional[str]

A string that is the serialized return value (in json form) of the Endpoint's predict function. Specifically, one can json.loads() the value of result to get the original python object back.

Exactly one of result_url or result will be populated, depending on the value of return_pickled in the request.

None traceback Optional[str]

The stack trace if the inference endpoint raised an error. Can be used for debugging

None"},{"location":"concepts/endpoint_predictions/#launch.model_endpoint.EndpointResponseStream","title":"EndpointResponseStream","text":"
EndpointResponseStream(response)\n

Bases: Iterator

Represents a stream response from an Endpoint. This object is iterable and yields EndpointResponse objects.

This object should not be directly instantiated by the user.

"},{"location":"concepts/endpoint_predictions/#launch.model_endpoint.EndpointResponseStream.__iter__","title":"__iter__","text":"
__iter__()\n

Uses server-sent events to iterate through the stream.

"},{"location":"concepts/endpoint_predictions/#launch.model_endpoint.EndpointResponseStream.__next__","title":"__next__","text":"
__next__()\n

Uses server-sent events to iterate through the stream.

"},{"location":"concepts/model_bundles/","title":"Model Bundles","text":"

Model Bundles are deployable models that can be used to make predictions. They are created by packaging a model up into a deployable format.

"},{"location":"concepts/model_bundles/#creating-model-bundles","title":"Creating Model Bundles","text":"

There are five methods for creating model bundles: create_model_bundle_from_callable_v2, create_model_bundle_from_dirs_v2, create_model_bundle_from_runnable_image_v2, create_model_bundle_from_triton_enhanced_runnable_image_v2, and create_model_bundle_from_streaming_enhanced_runnable_image_v2.

The first directly pickles a user-specified load_predict_fn, a function which loads the model and returns a predict_fn, a function which takes in a request. The second takes in directories containing a load_predict_fn and the module path to the load_predict_fn. The third takes a Docker image and a command that starts a process listening for requests at port 5005 using HTTP and exposes POST /predict and GET /readyz endpoints. The fourth is a variant of the third that also starts an instance of the NVidia Triton framework for efficient model serving. The fifth is a variant of the third that responds with a stream of SSEs at POST /stream (the user can decide whether POST /predict is also exposed).

Each of these modes of creating a model bundle is called a \"Flavor\".

Info

Creating From CallablesCreating From DirectoriesCreating From a Runnable ImageCreating From a Triton Enhanced Runnable ImageCreating From a Streaming Enhanced Runnable Image
import os\nfrom pydantic import BaseModel\nfrom launch import LaunchClient\nclass MyRequestSchema(BaseModel):\nx: int\ny: str\nclass MyResponseSchema(BaseModel):\n__root__: int\ndef my_load_predict_fn(model):\ndef returns_model_of_x_plus_len_of_y(x: int, y: str) -> int:\n\"\"\"MyRequestSchema -> MyResponseSchema\"\"\"\nassert isinstance(x, int) and isinstance(y, str)\nreturn model(x) + len(y)\nreturn returns_model_of_x_plus_len_of_y\ndef my_load_model_fn():\ndef my_model(x):\nreturn x * 2\nreturn my_model\nBUNDLE_PARAMS = {\n\"model_bundle_name\": \"test-bundle\",\n\"load_model_fn\": my_load_model_fn,\n\"load_predict_fn\": my_load_predict_fn,\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"requirements\": [\"pytest==7.2.1\", \"numpy\"],  # list your requirements here\n\"pytorch_image_tag\": \"1.7.1-cuda11.0-cudnn8-runtime\",\n}\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nclient.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS)\n
import os\nimport tempfile\nfrom pydantic import BaseModel\nfrom launch import LaunchClient\ndirectory = tempfile.mkdtemp()\nmodel_filename = os.path.join(directory, \"model.py\")\nwith open(model_filename, \"w\") as f:\nf.write(\"\"\"def my_load_model_fn(deserialized_config):\n    def my_model(x):\n        return x * 2\n    return my_model\n\"\"\")\npredict_filename = os.path.join(directory, \"predict.py\")\nwith open(predict_filename, \"w\") as f:\nf.write(\"\"\"def my_load_predict_fn(deserialized_config, model):\n    def returns_model_of_x_plus_len_of_y(x: int, y: str) -> int:\n        assert isinstance(x, int) and isinstance(y, str)\n        return model(x) + len(y)\n    return returns_model_of_x_plus_len_of_y\n\"\"\")\nrequirements_filename = os.path.join(directory, \"requirements.txt\")\nwith open(requirements_filename, \"w\") as f:\nf.write(\"\"\"\npytest==7.2.1\nnumpy\n\"\"\")\n\"\"\"\nThe directory structure should now look like\ndirectory/\n    model.py\n    predict.py\n    requirements.txt\n\"\"\"\nclass MyRequestSchema(BaseModel):\nx: int\ny: str\nclass MyResponseSchema(BaseModel):\n__root__: int\nprint(directory)\nprint(model_filename)\nprint(predict_filename)\nprint(requirements_filename)\nBUNDLE_PARAMS = {\n\"model_bundle_name\": \"test-bundle-from-dirs\",\n\"base_paths\": [directory],\n\"load_predict_fn_module_path\": f\"{os.path.basename(directory)}.predict.my_load_predict_fn\",\n\"load_model_fn_module_path\": f\"{os.path.basename(directory)}.model.my_load_model_fn\",\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"requirements_path\": requirements_filename,\n\"pytorch_image_tag\": \"1.7.1-cuda11.0-cudnn8-runtime\",\n}\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nclient.create_model_bundle_from_dirs_v2(**BUNDLE_PARAMS)\n# Clean up files from demo\nos.remove(model_filename)\nos.remove(predict_filename)\nos.remove(requirements_filename)\nos.rmdir(directory)\n
import os\nfrom pydantic import BaseModel\nfrom launch import LaunchClient\nclass MyRequestSchema(BaseModel):\nx: int\ny: str\nclass MyResponseSchema(BaseModel):\n__root__: int\nBUNDLE_PARAMS = {\n\"model_bundle_name\": \"test-bundle\",\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"repository\": \"...\",\n\"tag\": \"...\",\n\"command\": ...,\n\"predict_route\": \"/predict\",\n\"healthcheck_route\": \"/readyz\",\n\"env\": {\n\"TEST_KEY\": \"test_value\",\n},\n\"readiness_initial_delay_seconds\": 30,\n}\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nclient.create_model_bundle_from_runnable_image_v2(**BUNDLE_PARAMS)\n
import os\nfrom pydantic import BaseModel\nfrom launch import LaunchClient\nclass MyRequestSchema(BaseModel):\nx: int\ny: str\nclass MyResponseSchema(BaseModel):\n__root__: int\nBUNDLE_PARAMS = {\n\"model_bundle_name\": \"test-triton-bundle\",\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"repository\": \"...\",\n\"tag\": \"...\",\n\"command\": ...,\n\"predict_route\": \"/predict\",\n\"healthcheck_route\": \"/readyz\",\n\"env\": {\n\"TEST_KEY\": \"test_value\",\n},\n\"readiness_initial_delay_seconds\": 30,\n\"triton_model_repository\": \"...\",\n\"triton_model_replicas\": {\"\": \"\"},\n\"triton_num_cpu\": 4.0,\n\"triton_commit_tag\": \"\",\n\"triton_storage\": \"\",\n\"triton_memory\": \"\",\n\"triton_readiness_initial_delay_seconds\": 300,\n}\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nclient.create_model_bundle_from_triton_enhanced_runnable_image_v2(**BUNDLE_PARAMS)\n
import os\nfrom pydantic import BaseModel\nfrom launch import LaunchClient\nclass MyRequestSchema(BaseModel):\nx: int\ny: str\nclass MyResponseSchema(BaseModel):\n__root__: int\nBUNDLE_PARAMS = {\n\"model_bundle_name\": \"test-streaming-bundle\",\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"repository\": \"...\",\n\"tag\": \"...\",\n\"command\": ...,  # optional; if provided, will also expose the /predict endpoint\n\"predict_route\": \"/predict\",\n\"healthcheck_route\": \"/readyz\",\n\"streaming_command\": ...,  # required\n\"streaming_predict_route\": \"/stream\",\n\"env\": {\n\"TEST_KEY\": \"test_value\",\n},\n\"readiness_initial_delay_seconds\": 30,\n}\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nclient.create_model_bundle_from_streaming_enhanced_runnable_image_v2(**BUNDLE_PARAMS)\n
"},{"location":"concepts/model_bundles/#choosing-the-right-model-bundle-flavor","title":"Choosing the right model bundle flavor","text":"

Here are some tips for how to choose between the different flavors of ModelBundle:

A CloudpickleArtifactFlavor (creating from callable) is good if:

  • You are creating the model bundle from a Jupyter notebook.
  • The model bundle is small without too many dependencies.

A ZipArtifactFlavor (creating from directories) is good if:

  • You have a relatively constant set of dependencies.
  • You have a lot of custom code that you want to include in the model bundle.
  • You do not want to build a web server and Docker image to serve your model.

A RunnableImageFlavor (creating from runnable image) is good if:

  • You have a lot of dependencies.
  • You have a lot of custom code that you want to include in the model bundle.
  • You are comfortable with building a web server and Docker image to serve your model.

A TritonEnhancedRunnableImageFlavor (a runnable image variant) is good if:

  • You want to use a RunnableImageFlavor
  • You also want to use NVidia's tritonserver to accelerate model inference

A StreamingEnhancedRunnableImageFlavor (a runnable image variant) is good if:

  • You want to use a RunnableImageFlavor
  • You also want to support token streaming while the model is generating
"},{"location":"concepts/model_bundles/#configuring-model-bundles","title":"Configuring Model Bundles","text":"

The app_config field of a model bundle is a dictionary that can be used to configure the model bundle. If specified, the app_config is passed to the load_predict_fn when the model bundle is deployed, alongside the model. This can allow for more code reuse between multiple bundles that perform similar tasks.

Creating Model Bundles with app_config
import os\nfrom launch import LaunchClient\nfrom pydantic import BaseModel\nfrom typing import List, Union\nfrom typing_extensions import Literal\nclass MyRequestSchemaSingle(BaseModel):\nkind: Literal['single']\nx: int\ny: str\nclass MyRequestSchemaBatched(BaseModel):\nkind: Literal['batched']\nx: List[int]\ny: List[str]\nclass MyRequestSchema(BaseModel):\n__root__: Union[MyRequestSchemaSingle, MyRequestSchemaBatched]\nclass MyResponseSchema(BaseModel):\n__root__: Union[int, List[int]]\ndef my_load_predict_fn(app_config, model):\ndef returns_model_of_x_plus_len_of_y(x: Union[int, List[int]], y: Union[str, List[str]]) -> Union[int, List[int]]:\n\"\"\"MyRequestSchema -> MyResponseSchema\"\"\"\nif app_config[\"mode\"] == \"single\":\nassert isinstance(x, int) and isinstance(y, str)\nreturn model(x) + len(y)\nresult = []\nfor x_i, y_i in zip(x, y):\nresult.append(model(x_i) + len(y_i))\nreturn result\nreturn returns_model_of_x_plus_len_of_y\ndef my_load_model_fn(app_config):\ndef my_model_single(x: int):\nreturn x * 2\ndef my_model_batched(x: List[int]):\nreturn [my_model_single(x_i) for x_i in x]\nif app_config[\"mode\"] == \"single\":\nreturn my_model_single\nreturn my_model_batched\nBUNDLE_PARAMS_SINGLE = {\n\"model_bundle_name\": \"test-bundle-single\",\n\"load_predict_fn\": my_load_predict_fn,\n\"load_model_fn\": my_load_model_fn,\n\"requirements\": [\"pytest==7.2.1\", \"numpy\"],\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"pytorch_image_tag\": \"1.7.1-cuda11.0-cudnn8-runtime\",\n\"app_config\": {\"mode\": \"single\"},\n}\nBUNDLE_PARAMS_BATCHED = {\n\"model_bundle_name\": \"test-bundle-batched\",\n\"load_predict_fn\": my_load_predict_fn,\n\"load_model_fn\": my_load_model_fn,\n\"requirements\": [\"pytest==7.2.1\", \"numpy\"],\n\"request_schema\": MyRequestSchema,\n\"response_schema\": MyResponseSchema,\n\"pytorch_image_tag\": \"1.7.1-cuda11.0-cudnn8-runtime\",\n\"app_config\": {\"mode\": \"batched\"},\n}\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nbundle_single = client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS_SINGLE)\nbundle_batch = client.create_model_bundle_from_callable_v2(**BUNDLE_PARAMS_BATCHED)\n
"},{"location":"concepts/model_bundles/#updating-model-bundles","title":"Updating Model Bundles","text":"

Model Bundles are immutable, meaning they cannot be edited once created. However, it is possible to clone an existing model bundle with a new app_config using clone_model_bundle_with_changes_v2.

"},{"location":"concepts/model_bundles/#listing-model-bundles","title":"Listing Model Bundles","text":"

To list all the model bundles you own, use list_model_bundles_v2.

"},{"location":"concepts/model_endpoints/","title":"Model Endpoints","text":"

Model Endpoints are deployments of models that can receive requests and return predictions containing the results of the model's inference. Each model endpoint is associated with a model bundle, which contains the model's code. An endpoint specifies deployment parameters, such as the minimum and maximum number of workers, as well as the requested resources for each worker, such as the number of CPUs, amount of memory, GPU count, and type of GPU.

Endpoints can be asynchronous, synchronous, or streaming. Asynchronous endpoints return a future immediately after receiving a request, and the future can be used to retrieve the prediction once it is ready. Synchronous endpoints return the prediction directly after receiving a request. Streaming endpoints are variants of synchronous endpoints that return a stream of SSEs instead of a single HTTP response.

Info

"},{"location":"concepts/model_endpoints/#choosing-the-right-inference-mode","title":"Choosing the right inference mode","text":"

Here are some tips for how to choose between SyncEndpoint, StreamingEndpoint, AsyncEndpoint, and BatchJob for deploying your ModelBundle:

A SyncEndpoint is good if:

  • You have strict latency requirements (e.g. on the order of seconds or less).
  • You are willing to have resources continually allocated.

A StreamingEndpoint is good if:

  • You have stricter requirements on perceived latency than SyncEndpoint can support (e.g. you want tokens generated by the model to start being returned almost immediately rather than waiting for the model generation to finish).
  • You are willing to have resources continually allocated.

An AsyncEndpoint is good if:

  • You want to save on compute costs.
  • Your inference code takes a long time to run.
  • Your latency requirements are on the order of minutes.

A BatchJob is good if:

  • You know there is a large batch of inputs ahead of time.
  • You want to optimize for throughput instead of latency.
"},{"location":"concepts/model_endpoints/#creating-async-model-endpoints","title":"Creating Async Model Endpoints","text":"

Async model endpoints are the most cost-efficient way to perform inference on tasks that are less latency-sensitive.

Creating an Async Model Endpoint
import os\nfrom launch import LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.create_model_endpoint(\nendpoint_name=\"demo-endpoint-async\",\nmodel_bundle=\"test-bundle\",\ncpus=1,\nmin_workers=0,\nendpoint_type=\"async\",\nupdate_if_exists=True,\nlabels={\n\"team\": \"MY_TEAM\",\n\"product\": \"MY_PRODUCT\",\n},\n)\n
"},{"location":"concepts/model_endpoints/#creating-sync-model-endpoints","title":"Creating Sync Model Endpoints","text":"

Sync model endpoints are useful for latency-sensitive tasks, such as real-time inference. Sync endpoints are more expensive than async endpoints.

Note

Sync model endpoints require at least 1 min_worker.

Creating a Sync Model Endpoint
import os\nfrom launch import LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.create_model_endpoint(\nendpoint_name=\"demo-endpoint-sync\",\nmodel_bundle=\"test-bundle\",\ncpus=1,\nmin_workers=1,\nendpoint_type=\"sync\",\nupdate_if_exists=True,\nlabels={\n\"team\": \"MY_TEAM\",\n\"product\": \"MY_PRODUCT\",\n},\n)\n
"},{"location":"concepts/model_endpoints/#creating-streaming-model-endpoints","title":"Creating Streaming Model Endpoints","text":"

Streaming model endpoints are variants of sync model endpoints that are useful for tasks with strict requirements on perceived latency. Streaming endpoints are more expensive than async endpoints.

Note

Streaming model endpoints require at least 1 min_worker.

Creating a Streaming Model Endpoint
import os\nfrom launch import LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.create_model_endpoint(\nendpoint_name=\"demo-endpoint-streaming\",\nmodel_bundle=\"test-streaming-bundle\",\ncpus=1,\nmin_workers=1,\nper_worker=1,\nendpoint_type=\"streaming\",\nupdate_if_exists=True,\nlabels={\n\"team\": \"MY_TEAM\",\n\"product\": \"MY_PRODUCT\",\n},\n)\n
"},{"location":"concepts/model_endpoints/#managing-model-endpoints","title":"Managing Model Endpoints","text":"

Model endpoints can be listed, updated, and deleted using the Launch API.

Listing Model Endpoints
import os\nfrom launch import LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoints = client.list_model_endpoints()\n
Updating a Model Endpoint
import os\nfrom launch import LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nclient.edit_model_endpoint(\nmodel_endpoint=\"demo-endpoint-sync\",\nmax_workers=2,\n)\n
Deleting a Model Endpoint
import time\nimport os\nfrom launch import LaunchClient\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nendpoint = client.create_model_endpoint(\nendpoint_name=\"demo-endpoint-tmp\",\nmodel_bundle=\"test-bundle\",\ncpus=1,\nmin_workers=0,\nendpoint_type=\"async\",\nupdate_if_exists=True,\nlabels={\n\"team\": \"MY_TEAM\",\n\"product\": \"MY_PRODUCT\",\n},\n)\ntime.sleep(15)  # Wait for Launch to build the endpoint\nclient.delete_model_endpoint(model_endpoint_name=\"demo-endpoint-tmp\")\n
"},{"location":"concepts/overview/","title":"Overview","text":"

Creating deployments on Launch generally involves three steps:

  1. Create and upload a ModelBundle. Pass your trained model as well as pre-/post-processing code to the Scale Launch Python client, and we\u2019ll create a model bundle based on the code and store it in our Bundle Store.

  2. Create a ModelEndpoint. Pass a ModelBundle as well as infrastructure settings such as the desired number of GPUs to our client. This provisions resources on Scale\u2019s cluster dedicated to your ModelEndpoint.

  3. Make requests to the ModelEndpoint. You can make requests through the Python client, or make HTTP requests directly to Scale.

"},{"location":"guides/custom_docker_images/","title":"Custom docker images","text":"

Warning

This feature is currently in beta, and the API is likely to change. Please contact us if you are interested in using this feature.

If you need more customization that what cloudpickle or zip artifacts can offer, or if you just already have a pre-built docker image, then you can create a Model Bundle with that docker image. You will need to modify your image to run a web server that exposes HTTP port 5005.

In our example below, we assume that you have some existing Python function my_inference_fn that can be imported. If you need to invoke some other binary (e.g. a custom C++ binary), then you can shell out to the OS to call that binary; subsequent versions of this document will have native examples for non-Python binaries.

For choice of web server, we recommend FastAPI due to its speed and ergonomics. Any web server would work, although we give examples with FastAPI.

"},{"location":"guides/custom_docker_images/#step-1-install-requirements","title":"Step 1: Install Requirements","text":"

You can add fastapi and uvicorn to the requirements.txt file that gets installed as part of your Dockerfile. Alternatively, you can add pip install fastapi uvicorn to the Dockerfile directly.

"},{"location":"guides/custom_docker_images/#step-2-set-up-a-web-server-application","title":"Step 2: Set up a web server application","text":"

Inside your project workspace, create a server.py file with these contents:

# test='skip'\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\napp = FastAPI()\nclass MyRequestSchema(BaseModel):\nurl: str\nclass MyResponseSchema(BaseModel):\nresponse: str\ndef my_inference_fn(req: MyRequestSchema) -> MyResponseSchema:\n# This is an example inference function - you can instead import a function from your own codebase,\n# or shell out to the OS, etc.\nresp = req.url + \"_hello\"\nreturn MyResponseSchema(response=resp)\n@app.post(\"/predict\")\nasync def predict(request: MyRequestSchema) -> MyResponseSchema:\nresponse = my_inference_fn(request)\nreturn response\n@app.get(\"/readyz\")\ndef readyz():\nreturn \"ok\"\n
"},{"location":"guides/custom_docker_images/#step-3-rebuild-and-push-your-image","title":"Step 3: Rebuild and push your image","text":"

Build your updated Dockerfile and push the image to a location that is accessible by Scale. For instance, if you are using AWS ECR, please make sure that the necessary cross-account permissions allow Scale to pull your docker image.

"},{"location":"guides/custom_docker_images/#step-4-deploy","title":"Step 4: Deploy!","text":"

Now you can upload your docker image as a Model Bundle, and then create a Model Endpoint referencing that Model Bundle. Note that path.to.your.server.file:app in the command section below should be relative to the WORKDIR of your docker image.

# test='skip'\nimport os\nfrom launch import LaunchClient\nfrom server import MyRequestSchema, MyResponseSchema  # Defined as part of your server.py\nclient = LaunchClient(api_key=os.getenv(\"LAUNCH_API_KEY\"))\nmodel_bundle_name = \"my_bundle_name\"\nclient.create_model_bundle_from_runnable_image_v2(\nmodel_bundle_name=model_bundle_name,\nrequest_schema=MyRequestSchema,\nresponse_schema=MyResponseSchema,\nrepository=\"$YOUR_ECR_REPO\",\ntag=\"$YOUR_IMAGE_TAG\",\ncommand=[\n\"dumb-init\",\n\"--\",\n\"uvicorn\",\n\"path.to.your.server.file:app\",\n\"--port\",\n\"5005\",\n\"--host\",\n\"::\",\n],\npredict_route=\"/predict\",\nhealthcheck_route=\"/readyz\",\nreadiness_initial_delay_seconds=120,\nenv={},\n)\nclient.create_model_endpoint(\nendpoint_name=f\"endpoint-{model_bundle_name}\",\nmodel_bundle=model_bundle_name,\nendpoint_type=\"async\",\nmin_workers=0,\nmax_workers=1,\nper_worker=1,\nmemory=\"30Gi\",\nstorage=\"40Gi\",\ncpus=4, # This must  be at least 2 because forwarding services consume 1 cpu.\ngpus=1,\ngpu_type=\"nvidia-ampere-a10\",\nupdate_if_exists=True,\n)\n
"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..7b5d1a63 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,78 @@ + + + + https://scaleapi.github.io/launch-python-client/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/cli/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/api/client/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/api/endpoint_predictions/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/api/hooks/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/api/llms/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/api/model_bundles/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/api/model_endpoints/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/concepts/batch_jobs/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/concepts/callbacks/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/concepts/endpoint_predictions/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/concepts/model_bundles/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/concepts/model_endpoints/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/concepts/overview/ + 2024-05-30 + daily + + + https://scaleapi.github.io/launch-python-client/guides/custom_docker_images/ + 2024-05-30 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..b90c928e Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/src_docs/_static/css/custom.css b/src_docs/_static/css/custom.css deleted file mode 100644 index 1c0083c6..00000000 --- a/src_docs/_static/css/custom.css +++ /dev/null @@ -1,5 +0,0 @@ -@import url('https://fonts.googleapis.com/css2?family=IBM+Plex+Sans:wght@300;400;500;700&display=swap'); - -body { - font-family: 'IBM Plex Sans', sans-serif; -} diff --git a/src_docs/conf.py b/src_docs/conf.py deleted file mode 100644 index 96ef3f49..00000000 --- a/src_docs/conf.py +++ /dev/null @@ -1,89 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -sys.path.insert(0, os.path.abspath("../../")) - - -# -- Project information ----------------------------------------------------- - -project = "Launch" -copyright = "2022, Scale" -author = "Scale" - -# The full version, including alpha/beta/rc tags -from launch import __version__ # noqa: E402 - -release = "v" + str(__version__) - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx_click", - "autoapi.extension", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_title = "Launch API Reference" -html_theme = "furo" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] -html_css_files = [ - "css/custom.css", -] -html_favicon = "favicon-32x32.png" - -html_logo = "launch-logo.svg" -html_theme_options = {} - -# -- autogen configuration --------------------------------------------------- -autoapi_type = "python" -autoapi_dirs = ["../launch"] -autoapi_options = [ - "members", - "no-undoc-members", - "inherited-members", - "show-module-summary", - "imported-members", -] -autoapi_template_dir = "_templates" -autoapi_root = "api" -autoapi_python_class_content = "both" -autoapi_member_order = "groupwise" -autodoc_typehints = "description" -autoapi_add_toctree_entry = False -napoleon_include_init_with_doc = True diff --git a/src_docs/favicon-32x32.png b/src_docs/favicon-32x32.png deleted file mode 100644 index 52c5a605..00000000 Binary files a/src_docs/favicon-32x32.png and /dev/null differ diff --git a/src_docs/guides/deploying_your_code.rst b/src_docs/guides/deploying_your_code.rst deleted file mode 100644 index eafc60fc..00000000 --- a/src_docs/guides/deploying_your_code.rst +++ /dev/null @@ -1,3 +0,0 @@ -Deploying your code -=================== -Endpoints guide diff --git a/src_docs/guides/getting_started.rst b/src_docs/guides/getting_started.rst deleted file mode 100644 index 533b73d0..00000000 --- a/src_docs/guides/getting_started.rst +++ /dev/null @@ -1,100 +0,0 @@ -Introduction to Scale Launch -============================ - -Launch has three core concepts: - -- ``ModelBundle`` -- ``ModelEndpoint`` -- ``BatchJob`` - -``ModelBundle`` represents your model & code, and is what you're deploying; -``ModelEndpoint`` and ``BatchJob`` represent the means of how to deploy it. - -ModelBundle ------------ -A ``ModelBundle`` consists of a trained model as well as the surrounding preprocessing and postprocessing code. -Specifically, a ``ModelBundle`` consists of two Python objects, a ``load_predict_fn``, and either a ``model`` or ``load_model_fn``; such that - - - load_predict_fn(model) - - -or - - - load_predict_fn(load_model_fn()) - - -returns a function ``predict_fn`` that takes in one argument representing model input, -and outputs one argument representing model output. - -Typically, a ``model`` would be a Pytorch nn.Module or Tensorflow model, but can also be any arbitrary Python code. - -.. image:: /../src_docs/images/model_bundle.png - :width: 200px - -ModelEndpoint -------------- -A ``ModelEndpoint`` is a deployment of a ``ModelBundle`` that serves inference requests. -To create a ``ModelEndpoint``, the user must specify various infrastructure-level details, -such as the min & max workers; and the amount of CPU, memory, and GPU resources per worker. A ``ModelEndpoint`` -automatically scales the number of workers based on the amount of traffic. - -.. image:: /../src_docs/images/model_endpoint.png - :width: 400px - -There are two types of ``ModelEndpoint``: - -A ``SyncEndpoint`` takes in requests and immediately returns the response in a blocking manner. -A ``SyncEndpoint`` must always have at least 1 worker. - -An ``AsyncEndpoint`` takes in requests and returns an asynchronous response token. The user can later query to monitor -the status of the request. Asynchronous endpoints can scale up from zero, -which make them a cost effective choice for services that are not latency sensitive. - -BatchJob --------- -A ``BatchJob`` takes in a ``ModelBundle`` and a list of inputs -to predict on. Once a batch job completes, it cannot be restarted or accept additional requests. -Launch maintains metadata about batch jobs for users to query, even after batch jobs are complete. - -Choosing the right inference mode ---------------------------------- -Here are some tips for how to choose between ``SyncEndpoint``, ``AsyncEndpoint``, and ``BatchJob`` for deploying your -``ModelBundle``: - -A ``SyncEndpoint`` is good if: - -- You have strict latency requirements (e.g. on the order of seconds or less). - -- You are willing to have resources continually allocated. - -An ``AsyncEndpoint`` is good if: - -- You want to save on compute costs. - -- Your inference code takes a long time to run. - -- Your latency requirements are on the order of minutes. - -A ``BatchJob`` is good if: - -- You know there is a large batch of inputs ahead of time. - -- You want to optimize for throughput instead of latency. - -Overview of deployment steps ----------------------------- -1. Create and upload a ``ModelBundle``. Pass your trained model as well as pre-/post-processing code to -the Scale Launch Python client, and we'll create a model bundle based on the code and store it in our Bundle Store. - -2. Create a ``ModelEndpoint``. Pass a ``ModelBundle`` as well as infrastructure settings such as #GPUs to our client. -This provisions resources on Scale's cluster dedicated to your ``ModelEndpoint``. - -3. Make requests to the ``ModelEndpoint``. You can make requests through the Python client, or make HTTP requests directly -to Scale. - -See the Guides section for more detailed instructions. - -.. image:: /../src_docs/images/request_lifecycle.png - :width: 400px diff --git a/src_docs/guides/index.rst b/src_docs/guides/index.rst deleted file mode 100644 index 179bbbd8..00000000 --- a/src_docs/guides/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -How-to guides -============= - -.. toctree:: - getting_started - registering_your_code - deploying_your_code - integrations - troubleshooting diff --git a/src_docs/guides/integrations.rst b/src_docs/guides/integrations.rst deleted file mode 100644 index 5dbdd531..00000000 --- a/src_docs/guides/integrations.rst +++ /dev/null @@ -1,15 +0,0 @@ -Integrations -============ - -Integration with Scale Train ----------------------------- -TODO - -Integration with Scale Insight ------------------------------- -TODO - -Integration with Scale Nucleus ------------------------------- -TODO - link to Nucleus docs - diff --git a/src_docs/guides/registering_your_code.rst b/src_docs/guides/registering_your_code.rst deleted file mode 100644 index e81c1442..00000000 --- a/src_docs/guides/registering_your_code.rst +++ /dev/null @@ -1,3 +0,0 @@ -Registering your code -===================== -Bundles guide diff --git a/src_docs/guides/troubleshooting.rst b/src_docs/guides/troubleshooting.rst deleted file mode 100644 index aa8d6265..00000000 --- a/src_docs/guides/troubleshooting.rst +++ /dev/null @@ -1,14 +0,0 @@ -Troubleshooting -=============== - -Bundles -------- -Test - -Endpoints ---------- -Test - -Batch jobs ----------- -Test diff --git a/src_docs/images/model_bundle.png b/src_docs/images/model_bundle.png deleted file mode 100644 index 75667ae0..00000000 Binary files a/src_docs/images/model_bundle.png and /dev/null differ diff --git a/src_docs/images/model_endpoint.png b/src_docs/images/model_endpoint.png deleted file mode 100644 index f3bc7183..00000000 Binary files a/src_docs/images/model_endpoint.png and /dev/null differ diff --git a/src_docs/images/request_lifecycle.png b/src_docs/images/request_lifecycle.png deleted file mode 100644 index a2382720..00000000 Binary files a/src_docs/images/request_lifecycle.png and /dev/null differ diff --git a/src_docs/index.rst b/src_docs/index.rst deleted file mode 100644 index b052d408..00000000 --- a/src_docs/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -Welcome to the Launch API Reference! -===================================== - -Scale Launch provides ML engineers a simple Python interface for turning a local code snippet into a production service. - - -.. _installation: -Installation ------------- - -To use Scale Launch, first install it using `pip`: - -.. code-block:: console - - (venv) $ pip install git+https://github.com/scaleapi/launch-python-client.git - -Note that launch-python-client is a private library and can only be installed directly from the repository. - -.. _api: -Sections --------- - -.. toctree:: - :maxdepth: 4 - - api/launch/index - guides/index - - -Index ------ - -* :ref:`genindex` diff --git a/src_docs/launch-logo.svg b/src_docs/launch-logo.svg deleted file mode 100644 index 1e9a4431..00000000 --- a/src_docs/launch-logo.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/test/test_paths/__init__.py b/test/test_paths/__init__.py deleted file mode 100644 index 02b36375..00000000 --- a/test/test_paths/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import json -import typing - -import urllib3 -from urllib3._collections import HTTPHeaderDict - - -class ApiTestMixin: - json_content_type = "application/json" - user_agent = "OpenAPI-Generator/1.0.0/python" - - @classmethod - def assert_pool_manager_request_called_with( - cls, - mock_request, - url: str, - method: str = "POST", - body: typing.Optional[bytes] = None, - content_type: typing.Optional[str] = None, - accept_content_type: typing.Optional[str] = None, - stream: bool = False, - ): - headers = {"User-Agent": cls.user_agent} - if accept_content_type: - headers["Accept"] = accept_content_type - if content_type: - headers["Content-Type"] = content_type - kwargs = dict( - headers=HTTPHeaderDict(headers), - preload_content=not stream, - timeout=None, - ) - if content_type and method != "GET": - kwargs["body"] = body - mock_request.assert_called_with(method, url, **kwargs) - - @staticmethod - def headers_for_content_type(content_type: str) -> typing.Dict[str, str]: - return {"content-type": content_type} - - @classmethod - def response( - cls, - body: typing.Union[str, bytes], - status: int = 200, - content_type: str = json_content_type, - headers: typing.Optional[typing.Dict[str, str]] = None, - preload_content: bool = True, - ) -> urllib3.HTTPResponse: - if headers is None: - headers = {} - headers.update(cls.headers_for_content_type(content_type)) - return urllib3.HTTPResponse(body, headers=headers, status=status, preload_content=preload_content) - - @staticmethod - def json_bytes(in_data: typing.Any) -> bytes: - return json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode("utf-8") diff --git a/test/test_paths/test_healthcheck/__init__.py b/test/test_paths/test_healthcheck/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_healthcheck/test_get.py b/test/test_paths/test_healthcheck/test_get.py deleted file mode 100644 index 369e5a38..00000000 --- a/test/test_paths/test_healthcheck/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.healthcheck import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestHealthcheck(ApiTestMixin, unittest.TestCase): - """ - Healthcheck unit test stubs - Healthcheck # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_healthz/__init__.py b/test/test_paths/test_healthz/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_healthz/test_get.py b/test/test_paths/test_healthz/test_get.py deleted file mode 100644 index a4be4c09..00000000 --- a/test/test_paths/test_healthz/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.healthz import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestHealthz(ApiTestMixin, unittest.TestCase): - """ - Healthz unit test stubs - Healthcheck # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_readyz/__init__.py b/test/test_paths/test_readyz/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_readyz/test_get.py b/test/test_paths/test_readyz/test_get.py deleted file mode 100644 index 8876d438..00000000 --- a/test/test_paths/test_readyz/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.readyz import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestReadyz(ApiTestMixin, unittest.TestCase): - """ - Readyz unit test stubs - Healthcheck # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_async_tasks/__init__.py b/test/test_paths/test_v1_async_tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_async_tasks/test_post.py b/test/test_paths/test_v1_async_tasks/test_post.py deleted file mode 100644 index af187d92..00000000 --- a/test/test_paths/test_v1_async_tasks/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_async_tasks import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1AsyncTasks(ApiTestMixin, unittest.TestCase): - """ - V1AsyncTasks unit test stubs - Create Async Inference Task # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_async_tasks_task_id/__init__.py b/test/test_paths/test_v1_async_tasks_task_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_async_tasks_task_id/test_get.py b/test/test_paths/test_v1_async_tasks_task_id/test_get.py deleted file mode 100644 index 99311617..00000000 --- a/test/test_paths/test_v1_async_tasks_task_id/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_async_tasks_task_id import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1AsyncTasksTaskId(ApiTestMixin, unittest.TestCase): - """ - V1AsyncTasksTaskId unit test stubs - Get Async Inference Task # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_batch_jobs/__init__.py b/test/test_paths/test_v1_batch_jobs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_batch_jobs/test_post.py b/test/test_paths/test_v1_batch_jobs/test_post.py deleted file mode 100644 index 5edcd2ab..00000000 --- a/test/test_paths/test_v1_batch_jobs/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_batch_jobs import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1BatchJobs(ApiTestMixin, unittest.TestCase): - """ - V1BatchJobs unit test stubs - Create Batch Job # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_batch_jobs_batch_job_id/__init__.py b/test/test_paths/test_v1_batch_jobs_batch_job_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_batch_jobs_batch_job_id/test_get.py b/test/test_paths/test_v1_batch_jobs_batch_job_id/test_get.py deleted file mode 100644 index 2ba46baa..00000000 --- a/test/test_paths/test_v1_batch_jobs_batch_job_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_batch_jobs_batch_job_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1BatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): - """ - V1BatchJobsBatchJobId unit test stubs - Get Batch Job # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_batch_jobs_batch_job_id/test_put.py b/test/test_paths/test_v1_batch_jobs_batch_job_id/test_put.py deleted file mode 100644 index c07ed01b..00000000 --- a/test/test_paths/test_v1_batch_jobs_batch_job_id/test_put.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_batch_jobs_batch_job_id import ( # noqa: E501 - put, -) - -from .. import ApiTestMixin - - -class TestV1BatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): - """ - V1BatchJobsBatchJobId unit test stubs - Update Batch Job # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles/__init__.py b/test/test_paths/test_v1_docker_image_batch_job_bundles/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles/test_get.py b/test/test_paths/test_v1_docker_image_batch_job_bundles/test_get.py deleted file mode 100644 index 2afaed5f..00000000 --- a/test/test_paths/test_v1_docker_image_batch_job_bundles/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_job_bundles import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobBundles(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobBundles unit test stubs - List Docker Image Batch Job Model Bundles # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles/test_post.py b/test/test_paths/test_v1_docker_image_batch_job_bundles/test_post.py deleted file mode 100644 index a93143e9..00000000 --- a/test/test_paths/test_v1_docker_image_batch_job_bundles/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_job_bundles import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobBundles(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobBundles unit test stubs - Create Docker Image Batch Job Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/test_get.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/test_get.py deleted file mode 100644 index fd33431b..00000000 --- a/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobBundlesDockerImageBatchJobBundleId(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId unit test stubs - Get Docker Image Batch Job Model Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/__init__.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/test_get.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/test_get.py deleted file mode 100644 index e82be891..00000000 --- a/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_job_bundles_latest import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobBundlesLatest(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobBundlesLatest unit test stubs - Get Latest Docker Image Batch Job Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs/__init__.py b/test/test_paths/test_v1_docker_image_batch_jobs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_docker_image_batch_jobs/test_get.py b/test/test_paths/test_v1_docker_image_batch_jobs/test_get.py deleted file mode 100644 index 1ac9f998..00000000 --- a/test/test_paths/test_v1_docker_image_batch_jobs/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_jobs import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobs(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobs unit test stubs - List Docker Image Batch Jobs # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs/test_post.py b/test/test_paths/test_v1_docker_image_batch_jobs/test_post.py deleted file mode 100644 index 86b27b1a..00000000 --- a/test/test_paths/test_v1_docker_image_batch_jobs/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_jobs import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobs(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobs unit test stubs - Create Docker Image Batch Job # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/__init__.py b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_get.py b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_get.py deleted file mode 100644 index 1e507744..00000000 --- a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobsBatchJobId unit test stubs - Get Docker Image Batch Job # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_put.py b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_put.py deleted file mode 100644 index 3e08b35d..00000000 --- a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_put.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id import ( # noqa: E501 - put, -) - -from .. import ApiTestMixin - - -class TestV1DockerImageBatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): - """ - V1DockerImageBatchJobsBatchJobId unit test stubs - Update Docker Image Batch Job # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_files/__init__.py b/test/test_paths/test_v1_files/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_files/test_get.py b/test/test_paths/test_v1_files/test_get.py deleted file mode 100644 index 5232da4a..00000000 --- a/test/test_paths/test_v1_files/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_files import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1Files(ApiTestMixin, unittest.TestCase): - """ - V1Files unit test stubs - List Files # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_files/test_post.py b/test/test_paths/test_v1_files/test_post.py deleted file mode 100644 index 32a1f002..00000000 --- a/test/test_paths/test_v1_files/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_files import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1Files(ApiTestMixin, unittest.TestCase): - """ - V1Files unit test stubs - Upload File # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_files_file_id/__init__.py b/test/test_paths/test_v1_files_file_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_files_file_id/test_delete.py b/test/test_paths/test_v1_files_file_id/test_delete.py deleted file mode 100644 index f87b940f..00000000 --- a/test/test_paths/test_v1_files_file_id/test_delete.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_files_file_id import delete # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1FilesFileId(ApiTestMixin, unittest.TestCase): - """ - V1FilesFileId unit test stubs - Delete File # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_files_file_id/test_get.py b/test/test_paths/test_v1_files_file_id/test_get.py deleted file mode 100644 index 63e5f30e..00000000 --- a/test/test_paths/test_v1_files_file_id/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_files_file_id import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1FilesFileId(ApiTestMixin, unittest.TestCase): - """ - V1FilesFileId unit test stubs - Get File # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_files_file_id_content/__init__.py b/test/test_paths/test_v1_files_file_id_content/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_files_file_id_content/test_get.py b/test/test_paths/test_v1_files_file_id_content/test_get.py deleted file mode 100644 index 530b06cb..00000000 --- a/test/test_paths/test_v1_files_file_id_content/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_files_file_id_content import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1FilesFileIdContent(ApiTestMixin, unittest.TestCase): - """ - V1FilesFileIdContent unit test stubs - Get File Content # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_batch_completions/__init__.py b/test/test_paths/test_v1_llm_batch_completions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_batch_completions/test_post.py b/test/test_paths/test_v1_llm_batch_completions/test_post.py deleted file mode 100644 index cd9ae6e0..00000000 --- a/test/test_paths/test_v1_llm_batch_completions/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_batch_completions import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1LlmBatchCompletions(ApiTestMixin, unittest.TestCase): - """ - V1LlmBatchCompletions unit test stubs - Create Batch Completions # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_completions_stream/__init__.py b/test/test_paths/test_v1_llm_completions_stream/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_completions_stream/test_post.py b/test/test_paths/test_v1_llm_completions_stream/test_post.py deleted file mode 100644 index a1416702..00000000 --- a/test/test_paths/test_v1_llm_completions_stream/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_completions_stream import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV1LlmCompletionsStream(ApiTestMixin, unittest.TestCase): - """ - V1LlmCompletionsStream unit test stubs - Create Completion Stream Task # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_completions_sync/__init__.py b/test/test_paths/test_v1_llm_completions_sync/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_completions_sync/test_post.py b/test/test_paths/test_v1_llm_completions_sync/test_post.py deleted file mode 100644 index 2b04e8ca..00000000 --- a/test/test_paths/test_v1_llm_completions_sync/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_completions_sync import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1LlmCompletionsSync(ApiTestMixin, unittest.TestCase): - """ - V1LlmCompletionsSync unit test stubs - Create Completion Sync Task # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes/__init__.py b/test/test_paths/test_v1_llm_fine_tunes/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_fine_tunes/test_get.py b/test/test_paths/test_v1_llm_fine_tunes/test_get.py deleted file mode 100644 index eab298e4..00000000 --- a/test/test_paths/test_v1_llm_fine_tunes/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_fine_tunes import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1LlmFineTunes(ApiTestMixin, unittest.TestCase): - """ - V1LlmFineTunes unit test stubs - List Fine Tunes # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes/test_post.py b/test/test_paths/test_v1_llm_fine_tunes/test_post.py deleted file mode 100644 index f0cc0e07..00000000 --- a/test/test_paths/test_v1_llm_fine_tunes/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_fine_tunes import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1LlmFineTunes(ApiTestMixin, unittest.TestCase): - """ - V1LlmFineTunes unit test stubs - Create Fine Tune # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/__init__.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/test_get.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/test_get.py deleted file mode 100644 index 4d45ad16..00000000 --- a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1LlmFineTunesFineTuneId(ApiTestMixin, unittest.TestCase): - """ - V1LlmFineTunesFineTuneId unit test stubs - Get Fine Tune # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/test_put.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/test_put.py deleted file mode 100644 index 0c962326..00000000 --- a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/test_put.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_cancel import ( # noqa: E501 - put, -) - -from .. import ApiTestMixin - - -class TestV1LlmFineTunesFineTuneIdCancel(ApiTestMixin, unittest.TestCase): - """ - V1LlmFineTunesFineTuneIdCancel unit test stubs - Cancel Fine Tune # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/__init__.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/test_get.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/test_get.py deleted file mode 100644 index d5470fef..00000000 --- a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_events import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1LlmFineTunesFineTuneIdEvents(ApiTestMixin, unittest.TestCase): - """ - V1LlmFineTunesFineTuneIdEvents unit test stubs - Get Fine Tune Events # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints/__init__.py b/test/test_paths/test_v1_llm_model_endpoints/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_model_endpoints/test_get.py b/test/test_paths/test_v1_llm_model_endpoints/test_get.py deleted file mode 100644 index c50c4555..00000000 --- a/test/test_paths/test_v1_llm_model_endpoints/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_model_endpoints import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1LlmModelEndpoints(ApiTestMixin, unittest.TestCase): - """ - V1LlmModelEndpoints unit test stubs - List Model Endpoints # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints/test_post.py b/test/test_paths/test_v1_llm_model_endpoints/test_post.py deleted file mode 100644 index 3981e312..00000000 --- a/test/test_paths/test_v1_llm_model_endpoints/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_model_endpoints import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1LlmModelEndpoints(ApiTestMixin, unittest.TestCase): - """ - V1LlmModelEndpoints unit test stubs - Create Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_download/__init__.py b/test/test_paths/test_v1_llm_model_endpoints_download/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_model_endpoints_download/test_post.py b/test/test_paths/test_v1_llm_model_endpoints_download/test_post.py deleted file mode 100644 index f1fb9111..00000000 --- a/test/test_paths/test_v1_llm_model_endpoints_download/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_model_endpoints_download import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV1LlmModelEndpointsDownload(ApiTestMixin, unittest.TestCase): - """ - V1LlmModelEndpointsDownload unit test stubs - Download Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/__init__.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_delete.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_delete.py deleted file mode 100644 index 455d45e9..00000000 --- a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_delete.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name import ( # noqa: E501 - delete, -) - -from .. import ApiTestMixin - - -class TestV1LlmModelEndpointsModelEndpointName(ApiTestMixin, unittest.TestCase): - """ - V1LlmModelEndpointsModelEndpointName unit test stubs - Delete Llm Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_get.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_get.py deleted file mode 100644 index 00c8b86e..00000000 --- a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1LlmModelEndpointsModelEndpointName(ApiTestMixin, unittest.TestCase): - """ - V1LlmModelEndpointsModelEndpointName unit test stubs - Get Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_put.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_put.py deleted file mode 100644 index 0b90da8e..00000000 --- a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_put.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name import ( # noqa: E501 - put, -) - -from .. import ApiTestMixin - - -class TestV1LlmModelEndpointsModelEndpointName(ApiTestMixin, unittest.TestCase): - """ - V1LlmModelEndpointsModelEndpointName unit test stubs - Update Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_bundles/__init__.py b/test/test_paths/test_v1_model_bundles/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_bundles/test_get.py b/test/test_paths/test_v1_model_bundles/test_get.py deleted file mode 100644 index 9a3f702d..00000000 --- a/test/test_paths/test_v1_model_bundles/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_bundles import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1ModelBundles(ApiTestMixin, unittest.TestCase): - """ - V1ModelBundles unit test stubs - List Model Bundles # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_bundles/test_post.py b/test/test_paths/test_v1_model_bundles/test_post.py deleted file mode 100644 index a57ac049..00000000 --- a/test/test_paths/test_v1_model_bundles/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_bundles import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1ModelBundles(ApiTestMixin, unittest.TestCase): - """ - V1ModelBundles unit test stubs - Create Model Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_bundles_clone_with_changes/__init__.py b/test/test_paths/test_v1_model_bundles_clone_with_changes/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_bundles_clone_with_changes/test_post.py b/test/test_paths/test_v1_model_bundles_clone_with_changes/test_post.py deleted file mode 100644 index 94e37019..00000000 --- a/test/test_paths/test_v1_model_bundles_clone_with_changes/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_bundles_clone_with_changes import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV1ModelBundlesCloneWithChanges(ApiTestMixin, unittest.TestCase): - """ - V1ModelBundlesCloneWithChanges unit test stubs - Clone Model Bundle With Changes # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_bundles_latest/__init__.py b/test/test_paths/test_v1_model_bundles_latest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_bundles_latest/test_get.py b/test/test_paths/test_v1_model_bundles_latest/test_get.py deleted file mode 100644 index cc964745..00000000 --- a/test/test_paths/test_v1_model_bundles_latest/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_bundles_latest import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1ModelBundlesLatest(ApiTestMixin, unittest.TestCase): - """ - V1ModelBundlesLatest unit test stubs - Get Latest Model Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_bundles_model_bundle_id/__init__.py b/test/test_paths/test_v1_model_bundles_model_bundle_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_bundles_model_bundle_id/test_get.py b/test/test_paths/test_v1_model_bundles_model_bundle_id/test_get.py deleted file mode 100644 index 602fe975..00000000 --- a/test/test_paths/test_v1_model_bundles_model_bundle_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_bundles_model_bundle_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1ModelBundlesModelBundleId(ApiTestMixin, unittest.TestCase): - """ - V1ModelBundlesModelBundleId unit test stubs - Get Model Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints/__init__.py b/test/test_paths/test_v1_model_endpoints/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_endpoints/test_get.py b/test/test_paths/test_v1_model_endpoints/test_get.py deleted file mode 100644 index c000bf21..00000000 --- a/test/test_paths/test_v1_model_endpoints/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1ModelEndpoints(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpoints unit test stubs - List Model Endpoints # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints/test_post.py b/test/test_paths/test_v1_model_endpoints/test_post.py deleted file mode 100644 index 880fdbde..00000000 --- a/test/test_paths/test_v1_model_endpoints/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1ModelEndpoints(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpoints unit test stubs - Create Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_api/__init__.py b/test/test_paths/test_v1_model_endpoints_api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_endpoints_api/test_get.py b/test/test_paths/test_v1_model_endpoints_api/test_get.py deleted file mode 100644 index d88851d1..00000000 --- a/test/test_paths/test_v1_model_endpoints_api/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints_api import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1ModelEndpointsApi(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpointsApi unit test stubs - Get Model Endpoints Api # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/__init__.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_delete.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_delete.py deleted file mode 100644 index 5cbb222b..00000000 --- a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_delete.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id import ( # noqa: E501 - delete, -) - -from .. import ApiTestMixin - - -class TestV1ModelEndpointsModelEndpointId(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpointsModelEndpointId unit test stubs - Delete Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_get.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_get.py deleted file mode 100644 index 9e6ff427..00000000 --- a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1ModelEndpointsModelEndpointId(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpointsModelEndpointId unit test stubs - Get Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_put.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_put.py deleted file mode 100644 index 4be6978d..00000000 --- a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_put.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id import ( # noqa: E501 - put, -) - -from .. import ApiTestMixin - - -class TestV1ModelEndpointsModelEndpointId(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpointsModelEndpointId unit test stubs - Update Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/__init__.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/test_post.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/test_post.py deleted file mode 100644 index 79790def..00000000 --- a/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV1ModelEndpointsModelEndpointIdRestart(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpointsModelEndpointIdRestart unit test stubs - Restart Model Endpoint # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_schema_json/__init__.py b/test/test_paths/test_v1_model_endpoints_schema_json/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_model_endpoints_schema_json/test_get.py b/test/test_paths/test_v1_model_endpoints_schema_json/test_get.py deleted file mode 100644 index 09f6fb0c..00000000 --- a/test/test_paths/test_v1_model_endpoints_schema_json/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_model_endpoints_schema_json import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV1ModelEndpointsSchemaJson(ApiTestMixin, unittest.TestCase): - """ - V1ModelEndpointsSchemaJson unit test stubs - Get Model Endpoints Schema # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_streaming_tasks/__init__.py b/test/test_paths/test_v1_streaming_tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_streaming_tasks/test_post.py b/test/test_paths/test_v1_streaming_tasks/test_post.py deleted file mode 100644 index 89394b48..00000000 --- a/test/test_paths/test_v1_streaming_tasks/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_streaming_tasks import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1StreamingTasks(ApiTestMixin, unittest.TestCase): - """ - V1StreamingTasks unit test stubs - Create Streaming Inference Task # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_sync_tasks/__init__.py b/test/test_paths/test_v1_sync_tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_sync_tasks/test_post.py b/test/test_paths/test_v1_sync_tasks/test_post.py deleted file mode 100644 index 09bd6339..00000000 --- a/test/test_paths/test_v1_sync_tasks/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_sync_tasks import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1SyncTasks(ApiTestMixin, unittest.TestCase): - """ - V1SyncTasks unit test stubs - Create Sync Inference Task # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_triggers/__init__.py b/test/test_paths/test_v1_triggers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_triggers/test_get.py b/test/test_paths/test_v1_triggers/test_get.py deleted file mode 100644 index a45b402d..00000000 --- a/test/test_paths/test_v1_triggers/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_triggers import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1Triggers(ApiTestMixin, unittest.TestCase): - """ - V1Triggers unit test stubs - List Triggers # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_triggers/test_post.py b/test/test_paths/test_v1_triggers/test_post.py deleted file mode 100644 index a27a778a..00000000 --- a/test/test_paths/test_v1_triggers/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_triggers import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1Triggers(ApiTestMixin, unittest.TestCase): - """ - V1Triggers unit test stubs - Create Trigger # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_triggers_trigger_id/__init__.py b/test/test_paths/test_v1_triggers_trigger_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v1_triggers_trigger_id/test_delete.py b/test/test_paths/test_v1_triggers_trigger_id/test_delete.py deleted file mode 100644 index 48efdb01..00000000 --- a/test/test_paths/test_v1_triggers_trigger_id/test_delete.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_triggers_trigger_id import delete # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1TriggersTriggerId(ApiTestMixin, unittest.TestCase): - """ - V1TriggersTriggerId unit test stubs - Delete Trigger # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_triggers_trigger_id/test_get.py b/test/test_paths/test_v1_triggers_trigger_id/test_get.py deleted file mode 100644 index 2f8a87ed..00000000 --- a/test/test_paths/test_v1_triggers_trigger_id/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_triggers_trigger_id import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1TriggersTriggerId(ApiTestMixin, unittest.TestCase): - """ - V1TriggersTriggerId unit test stubs - Get Trigger # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v1_triggers_trigger_id/test_put.py b/test/test_paths/test_v1_triggers_trigger_id/test_put.py deleted file mode 100644 index 65d21839..00000000 --- a/test/test_paths/test_v1_triggers_trigger_id/test_put.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v1_triggers_trigger_id import put # noqa: E501 - -from .. import ApiTestMixin - - -class TestV1TriggersTriggerId(ApiTestMixin, unittest.TestCase): - """ - V1TriggersTriggerId unit test stubs - Update Trigger # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_batch_completions/__init__.py b/test/test_paths/test_v2_batch_completions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_batch_completions/test_post.py b/test/test_paths/test_v2_batch_completions/test_post.py deleted file mode 100644 index 5c808bab..00000000 --- a/test/test_paths/test_v2_batch_completions/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_batch_completions import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV2BatchCompletions(ApiTestMixin, unittest.TestCase): - """ - V2BatchCompletions unit test stubs - Batch Completions # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id/__init__.py b/test/test_paths/test_v2_batch_completions_batch_completion_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id/test_get.py b/test/test_paths/test_v2_batch_completions_batch_completion_id/test_get.py deleted file mode 100644 index f95ae8dd..00000000 --- a/test/test_paths/test_v2_batch_completions_batch_completion_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_batch_completions_batch_completion_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV2BatchCompletionsBatchCompletionId(ApiTestMixin, unittest.TestCase): - """ - V2BatchCompletionsBatchCompletionId unit test stubs - Get Batch Completion # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id/test_post.py b/test/test_paths/test_v2_batch_completions_batch_completion_id/test_post.py deleted file mode 100644 index 880f6f2d..00000000 --- a/test/test_paths/test_v2_batch_completions_batch_completion_id/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_batch_completions_batch_completion_id import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV2BatchCompletionsBatchCompletionId(ApiTestMixin, unittest.TestCase): - """ - V2BatchCompletionsBatchCompletionId unit test stubs - Update Batch Completion # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/__init__.py b/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/test_post.py b/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/test_post.py deleted file mode 100644 index 377c68ed..00000000 --- a/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV2BatchCompletionsBatchCompletionIdActionsCancel(ApiTestMixin, unittest.TestCase): - """ - V2BatchCompletionsBatchCompletionIdActionsCancel unit test stubs - Cancel Batch Completion # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_chat_completions/__init__.py b/test/test_paths/test_v2_chat_completions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_chat_completions/test_post.py b/test/test_paths/test_v2_chat_completions/test_post.py deleted file mode 100644 index 56741b47..00000000 --- a/test/test_paths/test_v2_chat_completions/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_chat_completions import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV2ChatCompletions(ApiTestMixin, unittest.TestCase): - """ - V2ChatCompletions unit test stubs - Chat Completion # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_completions/__init__.py b/test/test_paths/test_v2_completions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_completions/test_post.py b/test/test_paths/test_v2_completions/test_post.py deleted file mode 100644 index de8a3262..00000000 --- a/test/test_paths/test_v2_completions/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_completions import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV2Completions(ApiTestMixin, unittest.TestCase): - """ - V2Completions unit test stubs - Completion # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_model_bundles/__init__.py b/test/test_paths/test_v2_model_bundles/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_model_bundles/test_get.py b/test/test_paths/test_v2_model_bundles/test_get.py deleted file mode 100644 index 34a4f0a9..00000000 --- a/test/test_paths/test_v2_model_bundles/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_model_bundles import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV2ModelBundles(ApiTestMixin, unittest.TestCase): - """ - V2ModelBundles unit test stubs - List Model Bundles # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_model_bundles/test_post.py b/test/test_paths/test_v2_model_bundles/test_post.py deleted file mode 100644 index 56804d0c..00000000 --- a/test/test_paths/test_v2_model_bundles/test_post.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_model_bundles import post # noqa: E501 - -from .. import ApiTestMixin - - -class TestV2ModelBundles(ApiTestMixin, unittest.TestCase): - """ - V2ModelBundles unit test stubs - Create Model Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_model_bundles_clone_with_changes/__init__.py b/test/test_paths/test_v2_model_bundles_clone_with_changes/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_model_bundles_clone_with_changes/test_post.py b/test/test_paths/test_v2_model_bundles_clone_with_changes/test_post.py deleted file mode 100644 index f48288c0..00000000 --- a/test/test_paths/test_v2_model_bundles_clone_with_changes/test_post.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_model_bundles_clone_with_changes import ( # noqa: E501 - post, -) - -from .. import ApiTestMixin - - -class TestV2ModelBundlesCloneWithChanges(ApiTestMixin, unittest.TestCase): - """ - V2ModelBundlesCloneWithChanges unit test stubs - Clone Model Bundle With Changes # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_model_bundles_latest/__init__.py b/test/test_paths/test_v2_model_bundles_latest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_model_bundles_latest/test_get.py b/test/test_paths/test_v2_model_bundles_latest/test_get.py deleted file mode 100644 index 566fa232..00000000 --- a/test/test_paths/test_v2_model_bundles_latest/test_get.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_model_bundles_latest import get # noqa: E501 - -from .. import ApiTestMixin - - -class TestV2ModelBundlesLatest(ApiTestMixin, unittest.TestCase): - """ - V2ModelBundlesLatest unit test stubs - Get Latest Model Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_paths/test_v2_model_bundles_model_bundle_id/__init__.py b/test/test_paths/test_v2_model_bundles_model_bundle_id/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/test_paths/test_v2_model_bundles_model_bundle_id/test_get.py b/test/test_paths/test_v2_model_bundles_model_bundle_id/test_get.py deleted file mode 100644 index 8383ba28..00000000 --- a/test/test_paths/test_v2_model_bundles_model_bundle_id/test_get.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import unittest -from unittest.mock import patch - -import urllib3 - -import launch.api_client -from launch.api_client import api_client, configuration, schemas -from launch.api_client.paths.v2_model_bundles_model_bundle_id import ( # noqa: E501 - get, -) - -from .. import ApiTestMixin - - -class TestV2ModelBundlesModelBundleId(ApiTestMixin, unittest.TestCase): - """ - V2ModelBundlesModelBundleId unit test stubs - Get Model Bundle # noqa: E501 - """ - - _configuration = configuration.Configuration() - - def setUp(self): - used_api_client = api_client.ApiClient(configuration=self._configuration) - self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 - - def tearDown(self): - pass - - response_status = 200 - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index 9419215e..00000000 --- a/tests/conftest.py +++ /dev/null @@ -1,17 +0,0 @@ -import os -from pathlib import Path - -import pytest - - -@pytest.fixture -def tmp_work_path(tmp_path: Path): - """ - Create a temporary working directory. - """ - previous_cwd = Path.cwd() - os.chdir(tmp_path) - - yield tmp_path - - os.chdir(previous_cwd) diff --git a/tests/test_client.py b/tests/test_client.py deleted file mode 100644 index 49f55560..00000000 --- a/tests/test_client.py +++ /dev/null @@ -1,96 +0,0 @@ -import io -import json -import os -import shutil -import tempfile -from unittest.mock import MagicMock -from zipfile import ZipFile - -import pytest -from urllib3 import HTTPResponse - -import launch -from launch.api_client.api_client import ApiResponseWithoutDeserialization - - -def _get_mock_client(): - client = launch.LaunchClient(api_key="test") - return client - - -@pytest.fixture() -def fake_project_dir(): - tmpdir = tempfile.mkdtemp() - try: - os.mkdir(os.path.join(tmpdir, "project_root")) - os.mkdir(os.path.join(tmpdir, "project_root", "my_module1")) - - with open( - os.path.join(tmpdir, "project_root", "my_module1", "requirements.txt"), - "w", - ): - pass - - os.mkdir(os.path.join(tmpdir, "project_root", "my_module2")) - with open( - os.path.join(tmpdir, "project_root", "my_module2", "foobar.txt"), - "w", - ): - pass - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - -def test_create_model_bundle_from_dirs_bundle_contents_correct(requests_mock, fake_project_dir): # noqa: F811 - def check_bundle_upload_data(request): - request_body = request._request.body - zf = ZipFile(io.BytesIO(request_body), "r") - try: - actual_zip_filenames = set(zf.namelist()) - assert actual_zip_filenames == set(["my_module1/requirements.txt", "my_module2/foobar.txt"]) - return True - finally: - zf.close() - - requests_mock.post( - "https://api.scale.com/v1/hosted_inference/model_bundle_upload", - json={ - "signedUrl": "s3://my-fake-bucket/path/to/bundle", - "bucket": "my-fake-bucket", - "key": "path/to/bundle", - }, - ) - requests_mock.put( - "s3://my-fake-bucket/path/to/bundle", - additional_matcher=check_bundle_upload_data, - ) - launch.client.DefaultApi = MagicMock() - - client = _get_mock_client() - client.create_model_bundle_from_dirs( - model_bundle_name="my_test_bundle", - base_paths=[ - os.path.join(fake_project_dir, "project_root/my_module1"), - os.path.join(fake_project_dir, "project_root/my_module2"), - ], - requirements_path=os.path.join(fake_project_dir, "project_root/my_module1/requirements.txt"), - env_params={ - "framework_type": "pytorch", - "pytorch_image_tag": "1.10.0-cuda11.3-cudnn8-runtime", - }, - load_predict_fn_module_path="a.b.c", - load_model_fn_module_path="a.b.c", - app_config=None, - ) - - -def test_get_non_existent_model_endpoint(requests_mock): # noqa: F811 - client = _get_mock_client() - mock_api_client = MagicMock() - mock_api_client.list_model_endpoints_v1_model_endpoints_get.return_value = ApiResponseWithoutDeserialization( - response=HTTPResponse(body=json.dumps(dict(model_endpoints=[])), status=200) - ) - launch.client.DefaultApi = MagicMock(return_value=mock_api_client) - endpoint = client.get_model_endpoint("non-existent-endpoint") - assert endpoint is None diff --git a/tests/test_docs.py b/tests/test_docs.py deleted file mode 100644 index 07111bbd..00000000 --- a/tests/test_docs.py +++ /dev/null @@ -1,172 +0,0 @@ -import importlib.util -import re -from pathlib import Path -from textwrap import dedent -from unittest.mock import MagicMock, Mock - -import pytest -from _pytest.assertion.rewrite import AssertionRewritingHook - -from launch.api_client.model.completion_output import CompletionOutput -from launch.api_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch.model_bundle import ModelBundle -from launch.model_endpoint import AsyncEndpoint, ModelEndpoint, SyncEndpoint - -ROOT_DIR = Path(__file__).parent.parent - -TEST_SKIP_MAGIC_STRING = "# test='skip'" - - -@pytest.fixture -def import_execute(request, tmp_work_path: Path): - def _import_execute(module_name: str, source: str, rewrite_assertions: bool = False): - if rewrite_assertions: - loader = AssertionRewritingHook(config=request.config) - loader.mark_rewrite(module_name) - else: - loader = None - - module_path = tmp_work_path / f"{module_name}.py" - module_path.write_text(source) - spec = importlib.util.spec_from_file_location("__main__", str(module_path), loader=loader) - module = importlib.util.module_from_spec(spec) - try: - spec.loader.exec_module(module) - except KeyboardInterrupt: - print("KeyboardInterrupt") - - return _import_execute - - -def extract_code_chunks(path: Path, text: str, offset: int): - rel_path = path.relative_to(ROOT_DIR) - for m_code in re.finditer(r"^```(.*?)$\n(.*?)^```", text, flags=re.M | re.S): - prefix = m_code.group(1).lower() - if not prefix.startswith(("py", "{.py")): - continue - - start_line = offset + text[: m_code.start()].count("\n") + 1 - code = m_code.group(2) - if TEST_SKIP_MAGIC_STRING in code: - code = code.replace(TEST_SKIP_MAGIC_STRING, "") - start_line += 1 - end_line = start_line + code.count("\n") + 1 - source = "__skip__" - else: - end_line = start_line + code.count("\n") + 1 - source = "\n" * start_line + code - yield pytest.param(f"{path.stem}_{start_line}_{end_line}", source, id=f"{rel_path}:{start_line}-{end_line}") - - -def generate_code_chunks(*directories: str): - for d in directories: - for path in (ROOT_DIR / d).glob("**/*"): - if path.suffix == ".py": - code = path.read_text() - for m_docstring in re.finditer(r'(^\s*)r?"""$(.*?)\1"""', code, flags=re.M | re.S): - start_line = code[: m_docstring.start()].count("\n") - docstring = dedent(m_docstring.group(2)) - yield from extract_code_chunks(path, docstring, start_line) - elif path.suffix == ".md": - code = path.read_text() - yield from extract_code_chunks(path, code, 0) - - -@pytest.fixture -def mock_dictionary(): - mock = MagicMock() - mock.__getitem__.side_effect = lambda key: mock - return mock - - -@pytest.fixture -def mock_async_endpoint() -> AsyncEndpoint: - mock = Mock(spec=AsyncEndpoint) - mock.model_endpoint = Mock(spec=ModelEndpoint) - mock.model_endpoint.id = "test-endpoint" - mock.status = Mock(return_value="READY") - return mock - - -@pytest.fixture -def mock_model_bundle() -> ModelBundle: - mock = Mock(spec=ModelBundle) - mock.id = "test-bundle" - return mock - - -@pytest.fixture -def mock_batch_job(): - return {"job_id": "test-batch-job", "status": "SUCCESS"} - - -@pytest.fixture -def mock_list_llm_model_endpoints(): - mock = Mock(spec=SyncEndpoint) - mock.model_endpoint = Mock(spec=ModelEndpoint) - mock.model_endpoint.id = "test-endpoint" - mock.status = Mock(return_value="READY") - return [mock] - - -@pytest.fixture -def mock_completions_sync(): - return CompletionSyncV1Response( - output=CompletionOutput( - text="Deep learning is a subnet of machine learning.", num_prompt_tokens=10, num_completion_tokens=9 - ), - request_id="test-request-id", - ) - - -@pytest.mark.parametrize("module_name,source_code", generate_code_chunks("launch", "docs")) -def test_docs_examples( - module_name, - source_code, - import_execute, - mocker, - mock_dictionary, - mock_model_bundle, - mock_async_endpoint, - mock_batch_job, - mock_list_llm_model_endpoints, - mock_completions_sync, -): - mocker.patch("launch.connection.Connection", MagicMock()) - mocker.patch("launch.client.DefaultApi", MagicMock()) - mocker.patch("launch.model_endpoint.DefaultApi", MagicMock()) - mocker.patch("json.loads", MagicMock(return_value=mock_dictionary)) - mocker.patch("launch.model_bundle.ModelBundle.from_dict", MagicMock()) - mocker.patch("launch.model_endpoint.ModelEndpoint.from_dict", MagicMock()) - mocker.patch("launch.client.LaunchClient.get_model_bundle", MagicMock(return_value=mock_model_bundle)) - mocker.patch("launch.client.LaunchClient.get_model_endpoint", MagicMock(return_value=mock_async_endpoint)) - mocker.patch("launch.client.LaunchClient.create_model_bundle", MagicMock(return_value=mock_model_bundle)) - mocker.patch("launch.client.LaunchClient.create_model_endpoint", MagicMock(return_value=mock_async_endpoint)) - mocker.patch("launch.client.LaunchClient.get_batch_async_response", MagicMock(return_value=mock_batch_job)) - mocker.patch( - "launch.client.LaunchClient.list_llm_model_endpoints", MagicMock(return_value=mock_list_llm_model_endpoints) - ) - mocker.patch("launch.client.LaunchClient.create_llm_model_endpoint", MagicMock(return_value=mock_async_endpoint)) - mocker.patch("launch.client.LaunchClient.completions_sync", MagicMock(return_value=mock_completions_sync)) - mocker.patch("launch.client.Connection.make_request", MagicMock(return_value=mock_dictionary)) - mocker.patch("launch.client.requests", MagicMock()) - mocker.patch("pydantic.BaseModel.parse_raw", MagicMock()) - - if source_code == "__skip__": - pytest.skip("test='skip' on code snippet") - - async def dont_aiosleep(t): - pass - - async def dont_sleep(t): - pass - - mocker.patch("asyncio.sleep", new=dont_aiosleep) - mocker.patch("time.sleep", new=dont_sleep) - - try: - import_execute(module_name, source_code, True) - except Exception: - raise diff --git a/tests/test_import_lib.py b/tests/test_import_lib.py deleted file mode 100644 index deb4049a..00000000 --- a/tests/test_import_lib.py +++ /dev/null @@ -1,5 +0,0 @@ -from launch import LaunchClient # noqa - - -def test_dummy(): - assert 3.14 < 42 diff --git a/tests/test_make_batch_file.py b/tests/test_make_batch_file.py deleted file mode 100644 index 5a054418..00000000 --- a/tests/test_make_batch_file.py +++ /dev/null @@ -1,21 +0,0 @@ -import csv -from io import StringIO - -from launch.make_batch_file import make_batch_input_file - - -def test_make_batch_file(): - f = StringIO() - urls = ["one_url.count", "two_urls.count", "three_urls.count"] - make_batch_input_file(urls, f) - f.seek(0) - - reader = csv.DictReader(f) - rows = [row for row in reader] - print(f.getvalue()) - print(rows) - for tup in zip(enumerate(urls), rows): - print(tup) - (i, expected_row), actual_row = tup - assert str(i) == actual_row["id"] - assert expected_row == actual_row["url"] diff --git a/tests/test_model_endpoint.py b/tests/test_model_endpoint.py deleted file mode 100644 index 809e7cd9..00000000 --- a/tests/test_model_endpoint.py +++ /dev/null @@ -1,77 +0,0 @@ -import json -from datetime import datetime -from unittest.mock import MagicMock - -import requests -import requests_mock -from urllib3 import HTTPResponse - -import launch -from launch.api_client.api_client import ApiResponseWithoutDeserialization - - -def _get_mock_client(): - client = launch.LaunchClient(api_key="test") - return client - - -def test_status_returns_updated_value(requests_mock): # noqa: F811 - client = _get_mock_client() - - resp = dict( - model_endpoints=[ - dict( - bundle_name="test-returns-1", - configs=dict( - app_config=None, - endpoint_config=dict( - bundle_name="test-returns-1", - endpoint_name="test-endpoint", - post_inference_hooks=None, - ), - ), - destination="launch.xxx", - endpoint_type="async", - metadata={}, - name="test-endpoint", - resource_state=dict( - cpus="2", - gpus=0, - memory="4Gi", - ), - deployment_state=dict( - available_workers=1, - max_workers=1, - min_workers=1, - per_worker=1, - unavailable_workers=0, - ), - status="UPDATE_PENDING", - created_at=str(datetime.now()), - last_updated_at=str(datetime.now()), - created_by="test", - id="test", - ) - ] - ) - - mock_api_client = MagicMock() - launch.client.DefaultApi = MagicMock(return_value=mock_api_client) - launch.model_endpoint.DefaultApi = MagicMock(return_value=mock_api_client) - mock_api_client.list_model_endpoints_v1_model_endpoints_get.return_value = ApiResponseWithoutDeserialization( - response=HTTPResponse(body=json.dumps(resp), status=200) - ) - endpoint = client.get_model_endpoint("test-endpoint") # UPDATE_PENDING - assert endpoint.status() == "UPDATE_PENDING" - - resp["model_endpoints"][0]["status"] = "UPDATE_IN_PROGRESS" - mock_api_client.list_model_endpoints_v1_model_endpoints_get.return_value = ApiResponseWithoutDeserialization( - response=HTTPResponse(body=json.dumps(resp), status=200) - ) - assert endpoint.status() == "UPDATE_IN_PROGRESS" - - resp["model_endpoints"][0]["status"] = "SUCCESS" - mock_api_client.list_model_endpoints_v1_model_endpoints_get.return_value = ApiResponseWithoutDeserialization( - response=HTTPResponse(body=json.dumps(resp), status=200) - ) - assert endpoint.status() == "SUCCESS" diff --git a/tests/test_pydantic_schemas.py b/tests/test_pydantic_schemas.py deleted file mode 100644 index 7222312a..00000000 --- a/tests/test_pydantic_schemas.py +++ /dev/null @@ -1,106 +0,0 @@ -from typing import List, Optional - -import pydantic -import pytest -from pydantic import BaseModel, RootModel - -PYDANTIC_V2 = hasattr(pydantic, "VERSION") and pydantic.VERSION.startswith("2.") - -from launch.pydantic_schemas import ( - get_model_definitions, - get_model_definitions_from_flat_models, -) - - -@pytest.mark.skipif(PYDANTIC_V2, reason="Only for Pydantic v1") -def test_get_model_definitions_v1(): - class MyRequestSubSchemaB(BaseModel): - query: str - language: str - - class MyRequestSchemaB(BaseModel): - queries_and_languages: List[MyRequestSubSchemaB] - temperature: Optional[float] - - class MyResponseSchemaB(BaseModel): - responses: List[str] - total_num_tokens: int - time_elapsed: float - - result = get_model_definitions(request_schema=MyRequestSchemaB, response_schema=MyResponseSchemaB) - - expected = { - "MyRequestSubSchemaB": { - "title": "MyRequestSubSchemaB", - "type": "object", - "properties": { - "query": {"title": "Query", "type": "string"}, - "language": {"title": "Language", "type": "string"}, - }, - "required": ["query", "language"], - }, - "RequestSchema": { - "title": "MyRequestSchemaB", - "type": "object", - "properties": { - "queries_and_languages": { - "title": "Queries And Languages", - "type": "array", - "items": {"$ref": "#/components/schemas/MyRequestSubSchemaB"}, - }, - "temperature": {"title": "Temperature", "type": "number"}, - }, - "required": ["queries_and_languages"], - }, - "ResponseSchema": { - "title": "MyResponseSchemaB", - "type": "object", - "properties": { - "responses": { - "title": "Responses", - "type": "array", - "items": {"type": "string"}, - }, - "total_num_tokens": { - "title": "Total Num Tokens", - "type": "integer", - }, - "time_elapsed": {"title": "Time Elapsed", "type": "number"}, - }, - "required": ["responses", "total_num_tokens", "time_elapsed"], - }, - } - - assert result == expected - - -@pytest.mark.skipif(PYDANTIC_V2, reason="Only for Pydantic v1") -def test_get_model_definitions_from_flat_models(): - class MyRequestSchema(BaseModel): - x: int - y: str - - class MyResponseSchema(RootModel): - root: int - - flat_models = {MyRequestSchema, MyResponseSchema} - model_name_map = { - MyRequestSchema: "RequestSchema", - MyResponseSchema: "ResponseSchema", - } - - result = get_model_definitions_from_flat_models(flat_models=flat_models, model_name_map=model_name_map) - expected = { - "RequestSchema": { - "title": "MyRequestSchema", - "type": "object", - "properties": { - "x": {"title": "X", "type": "integer"}, - "y": {"title": "Y", "type": "string"}, - }, - "required": ["x", "y"], - }, - "ResponseSchema": {"title": "MyResponseSchema", "type": "integer"}, - } - - assert result == expected diff --git a/tests/test_utils.py b/tests/test_utils.py deleted file mode 100644 index ba100163..00000000 --- a/tests/test_utils.py +++ /dev/null @@ -1,20 +0,0 @@ -from launch.utils import trim_kwargs - - -def test_trim_kwargs(): - kwargs1 = {"cpus": 0.5, "gpus": None, "memory": "3Gi"} - expected1 = {"cpus": 0.5, "memory": "3Gi"} - - kwargs2 = {"cpus": 0.5, "memory": "3Gi"} - expected2 = {"cpus": 0.5, "memory": "3Gi"} - - kwargs3 = {} - expected3 = {} - - kwargs4 = {1: 2, 3: "", 4: 0, 5: None} - expected4 = {1: 2, 3: "", 4: 0} - - assert trim_kwargs(kwargs1) == expected1 - assert trim_kwargs(kwargs2) == expected2 - assert trim_kwargs(kwargs3) == expected3 - assert trim_kwargs(kwargs4) == expected4